train_transforms = T.Compose([
    T.MixupImage(mixup_epoch=-1),
    T.RandomDistort(),
    T.RandomExpand(im_padding_value=[123.675, 116.28, 103.53]),
    T.RandomCrop(),
    T.RandomHorizontalFlip(),
    T.BatchRandomResize(target_sizes=[
        320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672, 704, 736,
        768
    ],
                        interp='RANDOM'),
    T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

eval_transforms = T.Compose([
    T.Resize(target_size=640, interp='CUBIC'),
    T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

# 定义训练和验证所用的数据集
train_dataset = pdx.datasets.VOCDetection(
    data_dir='/home/aistudio/dataset',
    file_list='/home/aistudio/dataset/train_list.txt',
    label_list='/home/aistudio/dataset/labels.txt',
    transforms=train_transforms,
    shuffle=True)
eval_dataset = pdx.datasets.VOCDetection(
    data_dir='/home/aistudio/dataset',
    file_list='/home/aistudio/dataset/val_list.txt',
    label_list='/home/aistudio/dataset/labels.txt',
    transforms=eval_transforms,
import paddlex as pdx
from paddlex import transforms as T

# 定义预处理变换
train_transforms = T.Compose([
    T.Resize(target_size=[128, 800], interp='LINEAR', keep_ratio=False),
    T.RandomHorizontalFlip(),
    T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])

eval_transforms = T.Compose([
    T.Resize(target_size=[128, 800], interp='LINEAR', keep_ratio=False),
    T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])

# 定义数据集
train_dataset = pdx.datasets.SegDataset(data_dir='steel',
                                        file_list='steel/train_list.txt',
                                        label_list='steel/labels.txt',
                                        transforms=train_transforms,
                                        num_workers='auto',
                                        shuffle=True)

eval_dataset = pdx.datasets.SegDataset(data_dir='steel',
                                       file_list='steel/val_list.txt',
                                       label_list='steel/labels.txt',
                                       transforms=eval_transforms,
                                       shuffle=False)

# 定义模型
num_classes = len(train_dataset.labels)
예제 #3
0
# 定义训练和验证时的transforms
# API说明:https://github.com/PaddlePaddle/PaddleX/blob/release/2.0-rc/paddlex/cv/transforms/operators.py
train_transforms = T.Compose([
    T.MixupImage(mixup_epoch=250),
    T.RandomDistort(),
    T.RandomExpand(im_padding_value=[123.675, 116.28, 103.53]),
    T.RandomCrop(),
    T.RandomHorizontalFlip(),
    T.BatchRandomResize(
        target_sizes=[320, 352, 384, 416, 448, 480, 512, 544, 576, 608],
        interp='RANDOM'),
    T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

eval_transforms = T.Compose([
    T.Resize(608, interp='CUBIC'),
    T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

# 下载和解压表计检测数据集,如果已经预先下载,可注释掉下面两行
meter_det_dataset = 'https://bj.bcebos.com/paddlex/examples/meter_reader/datasets/meter_det.tar.gz'
pdx.utils.download_and_decompress(meter_det_dataset, path='./')

# 定义训练和验证所用的数据集
# API说明:https://github.com/PaddlePaddle/PaddleX/blob/develop/paddlex/cv/datasets/coco.py#L26
train_dataset = pdx.datasets.CocoDetection(
    data_dir='meter_det/train/',
    ann_file='meter_det/annotations/instance_train.json',
    transforms=train_transforms,
    shuffle=True)
eval_dataset = pdx.datasets.CocoDetection(
예제 #4
0
import paddlex as pdx
from paddlex import transforms as T

# 定义训练和验证时的transforms
# API说明:https://github.com/PaddlePaddle/PaddleX/blob/release/2.0-rc/paddlex/cv/transforms/operators.py
train_transforms = T.Compose([
    T.Resize(target_size=512),
    T.RandomHorizontalFlip(),
    T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])

eval_transforms = T.Compose([
    T.Resize(target_size=512),
    T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])

# 下载和解压指针刻度分割数据集,如果已经预先下载,可注释掉下面两行
meter_seg_dataset = 'https://bj.bcebos.com/paddlex/examples/meter_reader/datasets/meter_seg.tar.gz'
pdx.utils.download_and_decompress(meter_seg_dataset, path='./')

# 定义训练和验证所用的数据集
# API说明:https://github.com/PaddlePaddle/PaddleX/blob/release/2.0-rc/paddlex/cv/datasets/seg_dataset.py#L22
train_dataset = pdx.datasets.SegDataset(data_dir='meter_seg',
                                        file_list='meter_seg/train.txt',
                                        label_list='meter_seg/labels.txt',
                                        transforms=train_transforms,
                                        shuffle=True)

eval_dataset = pdx.datasets.SegDataset(data_dir='meter_seg',
                                       file_list='meter_seg/val.txt',
                                       label_list='meter_seg/labels.txt',