def train(model_dir, sensitivities_file, eval_metric_loss):
    # 定义训练和验证时的transforms
    train_transforms = transforms.Compose([
        transforms.MixupImage(mixup_epoch=250),
        transforms.RandomDistort(),
        transforms.RandomExpand(),
        transforms.RandomCrop(),
        transforms.Resize(target_size=608, interp='RANDOM'),
        transforms.RandomHorizontalFlip(),
        transforms.Normalize()
    ])
    eval_transforms = transforms.Compose([
        transforms.Resize(target_size=608, interp='CUBIC'),
        transforms.Normalize()
    ])

    # 定义训练和验证所用的数据集
    train_dataset = pdx.datasets.VOCDetection(
        data_dir='dataset',
        file_list='dataset/train_list.txt',
        label_list='dataset/labels.txt',
        transforms=train_transforms,
        shuffle=True)
    eval_dataset = pdx.datasets.VOCDetection(data_dir='dataset',
                                             file_list='dataset/val_list.txt',
                                             label_list='dataset/labels.txt',
                                             transforms=eval_transforms)

    if model_dir is None:
        # 使用imagenet数据集上的预训练权重
        pretrain_weights = "IMAGENET"
    else:
        assert os.path.isdir(model_dir), "Path {} is not a directory".format(
            model_dir)
        pretrain_weights = model_dir
    save_dir = "output/yolov3_mobile"
    if sensitivities_file is not None:
        if sensitivities_file != 'DEFAULT':
            assert os.path.exists(
                sensitivities_file), "Path {} not exist".format(
                    sensitivities_file)
        save_dir = "output/yolov3_mobile_prune"

    num_classes = len(train_dataset.labels)
    model = pdx.det.YOLOv3(num_classes=num_classes)
    model.train(num_epochs=400,
                train_dataset=train_dataset,
                train_batch_size=10,
                eval_dataset=eval_dataset,
                learning_rate=0.0001,
                lr_decay_epochs=[310, 350],
                pretrain_weights=pretrain_weights,
                save_dir=save_dir,
                use_vdl=True,
                sensitivities_file=sensitivities_file,
                eval_metric_loss=eval_metric_loss)
Пример #2
0
base = './data/'
import os

import paddlex as pdx
from paddlex.det import transforms

train_transforms = transforms.Compose([
    transforms.MixupImage(mixup_epoch=250),
    transforms.RandomDistort(),
    transforms.RandomExpand(),
    transforms.RandomCrop(),
    transforms.Resize(target_size=512, interp='RANDOM'),
    transforms.RandomHorizontalFlip(),
    transforms.Normalize(),
])

eval_transforms = transforms.Compose([
    transforms.Resize(target_size=512, interp='CUBIC'),
    transforms.Normalize(),
])
train_dataset = pdx.datasets.VOCDetection(data_dir=base,
                                          file_list=os.path.join(
                                              base, 'train.txt'),
                                          label_list='./data/labels.txt',
                                          transforms=train_transforms,
                                          shuffle=True)
eval_dataset = pdx.datasets.VOCDetection(data_dir=base,
                                         file_list=os.path.join(
                                             base, 'valid.txt'),
                                         transforms=eval_transforms,
                                         label_list='./data/labels.txt')
Пример #3
0
import os

os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import paddlex as pdx
import paddle.fluid as fluid
from paddlex.det import transforms

train_transforms = transforms.Compose([
    transforms.MixupImage(alpha=1.5, beta=1.5, mixup_epoch=-1),
    transforms.RandomExpand(),
    transforms.RandomCrop(),
    transforms.Resize(target_size=480),
    transforms.RandomHorizontalFlip(prob=0.5),
    transforms.Normalize()
])

eval_transforms = transforms.Compose([
    #transforms.Resize(target_size=480, interp='RANDOM'),
    transforms.Resize(target_size=480),
    transforms.Normalize()
])
#读取数据
train_dataset = pdx.datasets.VOCDetection(
    data_dir='./dataset',
    file_list='./dataset/train_list.txt',
    label_list='./dataset/label_list.txt',
    transforms=train_transforms)
eval_dataset = pdx.datasets.VOCDetection(data_dir='./dataset',
                                         file_list='./dataset/dev_list.txt',
                                         label_list='./dataset/label_list.txt',
                                         transforms=eval_transforms)
Пример #4
0
from paddlex.det import transforms
import paddlex as pdx

# 定义训练和验证时的transforms
# API说明:https://github.com/PaddlePaddle/PaddleX/blob/release/1.3.11/paddlex/cv/transforms/operators.py
train_transforms = transforms.Compose([
    transforms.MixupImage(mixup_epoch=-1),
    transforms.RandomDistort(),
    transforms.RandomExpand(),
    transforms.RandomCrop(),
    transforms.Resize(target_size=480, interp='RANDOM'),
    transforms.RandomHorizontalFlip(),
    transforms.Normalize(),
])

eval_transforms = transforms.Compose([
    transforms.Resize(target_size=480, interp='CUBIC'),
    transforms.Normalize(),
])

# 定义训练和验证所用的数据集
# API说明:https://github.com/PaddlePaddle/PaddleX/blob/release/1.3.11/paddlex/cv/datasets/voc.py
train_dataset = pdx.datasets.VOCDetection(
    data_dir='work/dataset_reinforcing_steel_bar_counting',
    file_list='work/dataset_reinforcing_steel_bar_counting/train_list.txt',
    label_list='work/dataset_reinforcing_steel_bar_counting/label_list.txt',
    transforms=train_transforms,
    shuffle=True)

eval_dataset = pdx.datasets.VOCDetection(
    data_dir='work/dataset_reinforcing_steel_bar_counting',