コード例 #1
0
def validate(net, num_wrapper, ctx_list):
    # Get validation dataset
    TEST_SCALES = [[960, 1280]]
    from dataset.aluminum_material import AluminumDet
    from gluoncv.utils.metrics.voc_detection import VOCMApMetric
    val_dataset = AluminumDet(is_train=False)
    metric = VOCMApMetric(iou_thresh=.5, class_names=[str(x) for x in range(len(val_dataset.classes))])
    for i in tqdm.tqdm(range(len(val_dataset))):
        im_name, gt_boxes = val_dataset.at_with_image_path(i)
        bboxes, scores, labels = im_detect_bbox_aug(net, nms_wrapper, im_name, TEST_SCALES, config.network.PIXEL_MEANS,
                                                    config.TRAIN.BBOX_STDS,
                                                    ctx=ctx, viz=False)
        gt_labels = gt_boxes[np.newaxis, :, 4]
        metric.update(pred_bboxes=bboxes[np.newaxis], pred_labels=labels[np.newaxis] - 1,
                      pred_scores=scores[np.newaxis], gt_bboxes=gt_boxes[np.newaxis, :, :4],
                      gt_labels=gt_labels, gt_difficults=np.zeros_like(gt_labels))
    classes_mAP = dict(zip(*metric.get()))
    classes_mAPChinese = {}

    for k in classes_mAP:
        v = classes_mAP[k]
        if k == "mAP":
            classes_mAPChinese[k] = v
        else:
            classes_mAPChinese[val_dataset.classes[int(k)]] = v
    for k in classes_mAPChinese.keys():
        print(k, classes_mAPChinese[k])
    return classes_mAPChinese["mAP"]
コード例 #2
0
def get_dataset(dataset, mixup=False, tclass=None):

    if dataset.lower() == 'real':
        train_dataset = RealDataset(mode='train')
        val_dataset = RealDataset(mode='test')
    elif dataset.lower() == 'real_with_grasp':
        train_dataset = RealGraspDataset(mode='train')
        val_dataset = RealGraspDataset(mode='test')
    elif dataset.lower() == 'synth_spec':
        train_dataset = SpecSynthDataset(tclass=tclass, root=pjoin(cfg.dataset_folder, 'synth_small_bg'), mode='all')
        val_dataset = SpecRealDataset(tclass=tclass, mode='all')
    elif dataset.lower() == 'synth_small_printer':
        train_dataset = SynthDataset(root=pjoin(cfg.dataset_folder, 'synth_small_printer'), mode='all')
        val_dataset = RealDataset(mode='test')
    elif dataset.lower() == 'synth_part2':
        train_dataset = SpecSynthDataset(root=pjoin(cfg.dataset_folder, 'synth_small_printer'), tclass='part2', mode='all')
        val_dataset = SpecRealDataset(mode='all', tclass='part2')
    elif dataset.lower() == 'synth_part3':
        train_dataset = SpecSynthDataset(root=pjoin(cfg.dataset_folder, 'synth_small_printer'), tclass='part3', mode='all')
        val_dataset = SpecRealDataset(mode='all', tclass='part3')
    elif dataset.lower() == 'synth_dosing_nozzle':
        train_dataset = SpecSynthDataset(root=pjoin(cfg.dataset_folder, 'synth_small_printer'), tclass='part3', mode='all')
        val_dataset = SpecRealDataset(mode='all', tclass='dosing_nozzle')
    elif dataset.split('_')[0] == 'synth':
        train_dataset = SynthDataset(root=pjoin(cfg.dataset_folder, dataset), mode='all')
        val_dataset = RealDataset(mode='test')


    val_metric = VOCMApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
    
    if mixup:
        from gluoncv.data.mixup import detection
        train_dataset = detection.MixupDetection(train_dataset)

    return train_dataset, val_dataset, val_metric
コード例 #3
0
def get_dataset(args):

    train_dataset = CycleDataset(root='data/filtered/',
                                 split_id='001',
                                 split="train",
                                 cache_frames=True,
                                 percent=0.1)
    val_dataset = CycleDataset(root='data/filtered/',
                               split_id='001',
                               split="val",
                               cache_frames=True,
                               percent=0.1)

    val_metric = VOCMApMetric(iou_thresh=0.5, class_names=val_dataset.classes)

    if args.num_samples < 0:
        args.num_samples = len(train_dataset)
    if args.mixup:
        from gluoncv.data import MixupDetection
        train_dataset = MixupDetection(train_dataset)
    return train_dataset, val_dataset, val_metric
    '../data_set_files/record_format_files/data-set_min/val.rec')
classes = [
    'car', 'articulated_truck', 'bus', 'bicycle', 'motorcycle',
    'motorized_vehicle', 'pedestrian', 'single_unit_truck', 'work_van',
    'pickup_truck', 'non-motorized_vehicle'
]  # only one foreground class here

# image, label = dataset[0]
# print('label:', label)
# display image and label
# ax = viz.plot_bbox(image, bboxes=label[:, :4], labels=label[:, 4:5], class_names=classes)
# plt.show()

net = gcv.model_zoo.get_model('ssd_512_resnet50_v1_voc', pretrained=True)
net.reset_class(classes)
eval_metric = VOCMApMetric(iou_thresh=0.5, class_names=classes)
# net.load_parameters('../trained_model_weights/ssd_resnet/epoch_35_ssd_512_resnet50_v1_voc_mio_tcd.params')
train_data, val_data = get_dataloader(net, dataset, val_dataset, 512, 4, 0)

try:
    a = mx.nd.zeros((1, ), ctx=mx.gpu(0))
    ctx = [mx.gpu(0)]
except:
    ctx = [mx.cpu()]

prefix = './train_logs/ssd_resnet/ssd_resnet'
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_path = prefix + '_train.log'
コード例 #5
0
def calc_map_all(data, size_map, metric):
    out = dict()
    for dataset in data.keys():
        out[dataset] = dict()
        for model in data[dataset].keys():
            out[dataset][model] = dict()
            mAP = calc_map(data[dataset][model].preds,
                           data[dataset][model].labels, (800, 600), metric)
            for cls_, val in zip(*mAP):
                out[dataset][model][cls_] = val
    return out


# %%
metric = VOCMApMetric(iou_thresh=0.5, class_names=val_ds.classes)
mAP_map = calc_map_all(data, size_map, metric)
# %%
out = mAP_map
df = pd.DataFrame({(i, j): out[i][j] for i in datasets for j in models})
df

# %%
datasets_form0 = [datasets_form[0], datasets_form[2], datasets_form[1]]
df.index = cfg.abre_classes + ['mAP']
df.columns.set_levels(datasets_form0, level=0, inplace=True)
df.columns.set_levels(models_form, level=1, inplace=True)
df

# %%
# save_text((df * 100).round(1).to_latex(), out_folder + '/map_table_test.txt')
コード例 #6
0
ファイル: eval_faster_rcnn.py プロジェクト: dmxj/my_study
ctx = mx.cpu()
model_path = "/Users/rensike/.mxnet/models/faster_rcnn_resnet50_v1b_voc-447328d8.params"
batch_size = 32
num_workers = 0

# init model
net = get_model("faster_rcnn_resnet50_v1b_voc",
                pretrained=False,
                pretrained_base=False)
net.load_parameters(model_path)
net.collect_params().reset_ctx(ctx)

# load val dataset
val_dataset = gdata.VOCDetection(root="/Users/rensike/Files/temp/voc_mini",
                                 splits=[(0, 'val')])
eval_metric = VOCMApMetric(iou_thresh=0.5, class_names=val_dataset.classes)

# load val dataloader
val_bfn = batchify.Tuple(*[batchify.Append() for _ in range(3)])
val_data_loader = mx.gluon.data.DataLoader(val_dataset.transform(
    FasterRCNNDefaultValTransform(net.short, net.max_size)),
                                           1,
                                           False,
                                           batchify_fn=val_bfn,
                                           last_batch='keep',
                                           num_workers=num_workers)

# do evaluate
eval_metric.reset()
net.hybridize(static_alloc=True)