# coding:utf-8
import os
# 选择使用0号卡
os.environ['CUDA_VISIBLE_DEVICES'] = '0'

import paddlex as pdx
from paddlex import transforms as T

# 定义训练和验证时的transforms
train_transforms = T.Compose([
    T.MixupImage(mixup_epoch=-1),
    T.RandomDistort(),
    T.RandomExpand(im_padding_value=[123.675, 116.28, 103.53]),
    T.RandomCrop(),
    T.RandomHorizontalFlip(),
    T.BatchRandomResize(target_sizes=[
        320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672, 704, 736,
        768
    ],
                        interp='RANDOM'),
    T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

eval_transforms = T.Compose([
    T.Resize(target_size=640, interp='CUBIC'),
    T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

# 定义训练和验证所用的数据集
train_dataset = pdx.datasets.VOCDetection(
    data_dir='/home/aistudio/dataset',
Esempio n. 2
0
# coding:utf-8
import os
# 选择使用0号卡
os.environ['CUDA_VISIBLE_DEVICES'] = '0'

import paddlex as pdx
from paddlex import transforms as T

# 定义训练和验证时的transforms
train_transforms = T.Compose([
    T.BatchRandomResize(target_sizes=[
        320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672, 704, 736,
        768
    ],
                        interp='RANDOM'),
    T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

eval_transforms = T.Compose([
    T.Resize(target_size=640, interp='CUBIC'),
    T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

# 定义训练和验证所用的数据集
train_dataset = pdx.datasets.VOCDetection(
    data_dir='/home/aistudio/dataset',
    file_list='/home/aistudio/dataset/train_list.txt',
    label_list='/home/aistudio/dataset/labels.txt',
    transforms=train_transforms,
    num_workers=0,
    shuffle=True)
import paddlex as pdx
from paddlex import transforms as T

# 定义预处理变换
train_transforms = T.Compose([
    T.Resize(target_size=[128, 800], interp='LINEAR', keep_ratio=False),
    T.RandomHorizontalFlip(),
    T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])

eval_transforms = T.Compose([
    T.Resize(target_size=[128, 800], interp='LINEAR', keep_ratio=False),
    T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])

# 定义数据集
train_dataset = pdx.datasets.SegDataset(data_dir='steel',
                                        file_list='steel/train_list.txt',
                                        label_list='steel/labels.txt',
                                        transforms=train_transforms,
                                        num_workers='auto',
                                        shuffle=True)

eval_dataset = pdx.datasets.SegDataset(data_dir='steel',
                                       file_list='steel/val_list.txt',
                                       label_list='steel/labels.txt',
                                       transforms=eval_transforms,
                                       shuffle=False)

# 定义模型
num_classes = len(train_dataset.labels)
Esempio n. 4
0
import paddlex as pdx
from paddlex import transforms as T

# 定义训练和验证时的transforms
# API说明:https://github.com/PaddlePaddle/PaddleX/blob/release/2.0-rc/paddlex/cv/transforms/operators.py
train_transforms = T.Compose([
    T.RandomResizeByShort(short_sizes=[640, 672, 704, 736, 768, 800],
                          max_size=1333,
                          interp='CUBIC'),
    T.RandomHorizontalFlip(),
    T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

eval_transforms = T.Compose([
    T.ResizeByShort(short_size=800, max_size=1333, interp='CUBIC'),
    T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

# 定义训练和验证所用的数据集
# API说明:https://github.com/PaddlePaddle/PaddleX/blob/develop/paddlex/cv/datasets/coco.py#L26
train_dataset = pdx.datasets.CocoDetection(data_dir='dataset/JPEGImages',
                                           ann_file='dataset/train.json',
                                           transforms=train_transforms,
                                           shuffle=True)
eval_dataset = pdx.datasets.CocoDetection(data_dir='dataset/JPEGImages',
                                          ann_file='dataset/val.json',
                                          transforms=eval_transforms)

# 初始化模型,并进行训练
# 可使用VisualDL查看训练指标,参考https://github.com/PaddlePaddle/PaddleX/tree/release/2.0-rc/tutorials/train#visualdl可视化训练指标
num_classes = len(train_dataset.labels)
Esempio n. 5
0
import paddlex as pdx
from paddlex import transforms as T

# 定义训练和验证时的transforms
# API说明:https://github.com/PaddlePaddle/PaddleX/blob/release/2.0-rc/paddlex/cv/transforms/operators.py
train_transforms = T.Compose([
    T.Resize(target_size=512),
    T.RandomHorizontalFlip(),
    T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])

eval_transforms = T.Compose([
    T.Resize(target_size=512),
    T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])

# 下载和解压指针刻度分割数据集,如果已经预先下载,可注释掉下面两行
meter_seg_dataset = 'https://bj.bcebos.com/paddlex/examples/meter_reader/datasets/meter_seg.tar.gz'
pdx.utils.download_and_decompress(meter_seg_dataset, path='./')

# 定义训练和验证所用的数据集
# API说明:https://github.com/PaddlePaddle/PaddleX/blob/release/2.0-rc/paddlex/cv/datasets/seg_dataset.py#L22
train_dataset = pdx.datasets.SegDataset(data_dir='meter_seg',
                                        file_list='meter_seg/train.txt',
                                        label_list='meter_seg/labels.txt',
                                        transforms=train_transforms,
                                        shuffle=True)

eval_dataset = pdx.datasets.SegDataset(data_dir='meter_seg',
                                       file_list='meter_seg/val.txt',
                                       label_list='meter_seg/labels.txt',