Beispiel #1
0
def train():
    from paddlex.cls import transforms
    train_transforms = transforms.Compose([
        transforms.RandomCrop(crop_size=224),
        transforms.RandomHorizontalFlip(),
        transforms.Normalize()
    ])
    eval_transforms = transforms.Compose([
        transforms.ResizeByShort(short_size=256),
        transforms.CenterCrop(crop_size=224),
        transforms.Normalize()
    ])

    train_dataset = pdx.datasets.ImageNet(
        data_dir=f'{data_dir}/mini_imagenet_veg',
        file_list=f'{data_dir}/mini_imagenet_veg/train_list.txt',
        label_list=f'{data_dir}/mini_imagenet_veg/labels.txt',
        transforms=train_transforms)
    eval_dataset = pdx.datasets.ImageNet(
        data_dir=f'{data_dir}/mini_imagenet_veg',
        file_list=f'{data_dir}/mini_imagenet_veg/val_list.txt',
        label_list=f'{data_dir}/mini_imagenet_veg/labels.txt',
        transforms=eval_transforms)

    num_classes = len(train_dataset.labels)
    model = pdx.cls.MobileNetV3_large_ssld(num_classes=num_classes)
    model.train(num_epochs=12,
                train_dataset=train_dataset,
                train_batch_size=32,
                eval_dataset=eval_dataset,
                lr_decay_epochs=[6, 8],
                save_interval_epochs=1,
                learning_rate=0.00625,
                save_dir=f'{output_dir}/output/mobilenetv3_large_ssld',
                use_vdl=True)
Beispiel #2
0
def train():
    from paddlex.cls import transforms
    train_transforms = transforms.Compose([
        transforms.RandomCrop(crop_size=224),
        transforms.RandomHorizontalFlip(),
        transforms.Normalize()
    ])
    eval_transforms = transforms.Compose([
        transforms.ResizeByShort(short_size=256),
        transforms.CenterCrop(crop_size=224),
        transforms.Normalize()
    ])

    train_dataset = pdx.datasets.ImageNet(
        data_dir=f'{data_dir}/vegetables_cls',
        file_list=f'{data_dir}/vegetables_cls/train_list.txt',
        label_list=f'{data_dir}/vegetables_cls/labels.txt',
        transforms=train_transforms,
        shuffle=True)
    eval_dataset = pdx.datasets.ImageNet(
        data_dir=f'{data_dir}/vegetables_cls',
        file_list=f'{data_dir}/vegetables_cls/val_list.txt',
        label_list=f'{data_dir}/vegetables_cls/labels.txt',
        transforms=eval_transforms)

    num_classes = len(train_dataset.labels)
    model = pdx.cls.MobileNetV2(num_classes=num_classes)
    model.train(num_epochs=10,
                train_dataset=train_dataset,
                train_batch_size=32,
                eval_dataset=eval_dataset,
                lr_decay_epochs=[4, 6, 8],
                save_interval_epochs=1,
                learning_rate=0.025,
                save_dir=f'{output_dir}/mobilenetv2',
                use_vdl=True)
Beispiel #3
0
def CBGetNewCompose():
    global AllCompose
    global BchangeState
    BchangeState = True
    AllCompose = transforms.Compose([
        transforms.RandomDistort(brightness_range=Brightness,
                                 brightness_prob=1,
                                 contrast_range=Contrastness,
                                 contrast_prob=1,
                                 saturation_range=Saturation,
                                 saturation_prob=1,
                                 hue_range=Hue,
                                 hue_prob=1)
    ])
    if globalimg is not None:
        if State is False:
            postimg()
            draw_img(transimg)
Beispiel #4
0
 def build_transforms(self, transforms_info, to_rgb=True):
     if self.model_type == "classifier":
         from paddlex.cls import transforms
     elif self.model_type == "detector":
         from paddlex.det import transforms
     elif self.model_type == "segmenter":
         from paddlex.seg import transforms
     op_list = list()
     for op_info in transforms_info:
         op_name = list(op_info.keys())[0]
         op_attr = op_info[op_name]
         if not hasattr(transforms, op_name):
             raise Exception(
                 "There's no operator named '{}' in transforms of {}".
                 format(op_name, self.model_type))
         op_list.append(getattr(transforms, op_name)(**op_attr))
     eval_transforms = transforms.Compose(op_list)
     if hasattr(eval_transforms, 'to_rgb'):
         eval_transforms.to_rgb = to_rgb
     self.arrange_transforms(eval_transforms)
     return eval_transforms
Beispiel #5
0
def train(model_dir=None, sensitivities_file=None, eval_metric_loss=0.05):
    # 下载和解压蔬菜分类数据集
    veg_dataset = 'https://bj.bcebos.com/paddlex/datasets/vegetables_cls.tar.gz'
    pdx.utils.download_and_decompress(veg_dataset, path='./')

    # 定义训练和验证时的transforms
    train_transforms = transforms.Compose([
        transforms.RandomCrop(crop_size=224),
        transforms.RandomHorizontalFlip(),
        transforms.Normalize()
    ])
    eval_transforms = transforms.Compose([
        transforms.ResizeByShort(short_size=256),
        transforms.CenterCrop(crop_size=224),
        transforms.Normalize()
    ])

    # 定义训练和验证所用的数据集
    train_dataset = pdx.datasets.ImageNet(
        data_dir='vegetables_cls',
        file_list='vegetables_cls/train_list.txt',
        label_list='vegetables_cls/labels.txt',
        transforms=train_transforms,
        shuffle=True)
    eval_dataset = pdx.datasets.ImageNet(
        data_dir='vegetables_cls',
        file_list='vegetables_cls/val_list.txt',
        label_list='vegetables_cls/labels.txt',
        transforms=eval_transforms)

    num_classes = len(train_dataset.labels)
    model = pdx.cls.MobileNetV2(num_classes=num_classes)

    if model_dir is None:
        # 使用imagenet数据集预训练模型权重
        pretrain_weights = "IMAGENET"
    else:
        # 使用传入的model_dir作为预训练模型权重
        assert os.path.isdir(model_dir), "Path {} is not a directory".format(
            model_dir)
        pretrain_weights = model_dir

    save_dir = './output/mobilenetv2'
    if sensitivities_file is not None:
        # DEFAULT 指使用模型预置的参数敏感度信息作为裁剪依据
        if sensitivities_file != "DEFAULT":
            assert os.path.exists(
                sensitivities_file), "Path {} not exist".format(
                    sensitivities_file)
        save_dir = './output/mobilenetv2_prune'

    model.train(
        num_epochs=10,
        train_dataset=train_dataset,
        train_batch_size=32,
        eval_dataset=eval_dataset,
        lr_decay_epochs=[4, 6, 8],
        learning_rate=0.025,
        pretrain_weights=pretrain_weights,
        save_dir=save_dir,
        use_vdl=True,
        sensitivities_file=sensitivities_file,
        eval_metric_loss=eval_metric_loss)
Beispiel #6
0
# 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'

from paddlex.cls import transforms
import paddlex as pdx

# 下载和解压蔬菜分类数据集
# veg_dataset = 'https://bj.bcebos.com/paddlex/datasets/vegetables_cls.tar.gz'
# pdx.utils.download_and_decompress(veg_dataset, path='./')

# 定义训练和验证时的transforms
# API说明https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/cls_transforms.html
train_transforms = transforms.Compose([
    transforms.RandomHorizontalFlip(),
    transforms.RandomRotate(),
    transforms.RandomDistort(),
    transforms.Normalize()
])
# eval_transforms = transforms.Compose([
#     transforms.Normalize()
# ])

# 定义训练和验证所用的数据集
# API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-imagenet
train_dataset = pdx.datasets.ImageNet(
    data_dir='/home/aistudio/data/data67498/train',
    file_list='/home/aistudio/data/data67498/train/train_list.txt',
    label_list='/home/aistudio/data/data67498/train/labels.txt',
    transforms=train_transforms,
    shuffle=True)
# eval_dataset = pdx.datasets.ImageNet(
with open(os.path.join('train_list.txt'), 'w') as f:
    for i, cls_fold in enumerate(os.listdir(base)):
        cls_base = os.path.join(base, cls_fold)
        files = os.listdir(cls_base)
        print('{} train num:'.format(cls_fold), len(files))
        for pt in files:
            img = os.path.join(cls_fold, pt)
            info = img + ' ' + str(i) + '\n'
            f.write(info)

with open(os.path.join('labels.txt'), 'w') as f:
    for i, cls_fold in enumerate(os.listdir(base)):
        f.write(cls_fold + '\n')

train_transforms = transforms.Compose(
    [transforms.RandomCrop(crop_size=224),
     transforms.Normalize()])

train_dataset = pdx.datasets.ImageNet(data_dir=base,
                                      file_list='train_list.txt',
                                      label_list='labels.txt',
                                      transforms=train_transforms,
                                      shuffle=True)

num_classes = len(train_dataset.labels)
model = pdx.cls.ResNet18(num_classes=num_classes)
model.train(num_epochs=20,
            train_dataset=train_dataset,
            train_batch_size=32,
            lr_decay_epochs=[5, 10, 15],
            learning_rate=2e-2,
Beispiel #8
0
        [(xmin, ymax), (xmax, ymax)]
    ]
    if color == "R":
        color = (0, 0, 255)
    elif color == "G":
        color = (0, 255, 0)
    elif color == "B":
        color = (255, 0, 0)

    for l in lines:
        cv2.line(img, l[0], l[1], color, 2)


cls_model = pdx.load_model('output/gear_clas/')
cls_trans = cls_transforms.Compose([
    # cls_transforms.Resize(),
    cls_transforms.Normalize()
])

det_model = pdx.load_model('output/gear_det/')
det_trans = det_transforms.Compose([
    det_transforms.Resize(),
    det_transforms.Normalize()
])

vid_dir = "/home/aistudio/plane/vid-split/train/"
itv = 5
bs = 4

for vid_name in tqdm(os.listdir(vid_dir)):
    print(vid_name)
    vidcap = cv2.VideoCapture(osp.join(vid_dir, vid_name))
Beispiel #9
0
import os
# 选择使用0号卡
os.environ['CUDA_VISIBLE_DEVICES'] = '0'

import paddle.fluid as fluid
from paddlex.cls import transforms
import paddlex as pdx

# 下载和解压蔬菜分类数据集
veg_dataset = 'https://bj.bcebos.com/paddlex/datasets/vegetables_cls.tar.gz'
pdx.utils.download_and_decompress(veg_dataset, path='./')

# 定义训练和验证时的transforms
train_transforms = transforms.Compose(
    [transforms.RandomCrop(crop_size=224),
     transforms.Normalize()])
eval_transforms = transforms.Compose([
    transforms.ResizeByShort(short_size=256),
    transforms.CenterCrop(crop_size=224),
    transforms.Normalize()
])

# 定义训练和验证所用的数据集
train_dataset = pdx.datasets.ImageNet(
    data_dir='vegetables_cls',
    file_list='vegetables_cls/train_list.txt',
    label_list='vegetables_cls/labels.txt',
    transforms=train_transforms,
    shuffle=True)
eval_dataset = pdx.datasets.ImageNet(data_dir='vegetables_cls',
                                     file_list='vegetables_cls/val_list.txt',
Beispiel #10
0
from paddlex.cls import transforms
import paddlex
import cv2

base = './data'

train_transforms = transforms.Compose([
    transforms.ResizeByShort(256),
    transforms.RandomCrop(crop_size=224),
    transforms.RandomRotate(rotate_range=30, prob=0.5),
    transforms.RandomDistort(),
    transforms.Normalize()
])

train_dataset = paddlex.datasets.ImageNet(data_dir=base,
                                          file_list='train_list.txt',
                                          label_list='labels.txt',
                                          transforms=train_transforms,
                                          shuffle=True)

model = paddlex.load_model('weights/final')
im = cv2.imread('test.jpg')

print(model.evaluate(eval_dataset=train_dataset))
import paddle.fluid as fluid
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image

#定义训练/验证图像处理流程transforms
from paddlex.cls import transforms

train_transforms = transforms.Compose([
    transforms.RandomCrop(crop_size=224),
    transforms.RandomHorizontalFlip(),
    transforms.RandomDistort(brightness_range=0.9,
                             brightness_prob=0.5,
                             contrast_range=0.9,
                             contrast_prob=0.5,
                             saturation_range=0.9,
                             saturation_prob=0.5,
                             hue_range=18,
                             hue_prob=0.5),
    transforms.Normalize()
])

eval_transforms = transforms.Compose([
    transforms.ResizeByShort(short_size=256),
    transforms.CenterCrop(crop_size=224),
    transforms.Normalize()
])

# 定义dataset加载图像分类数据集
train_dataset = pdx.datasets.ImageNet(data_dir='garbage_data',
Beispiel #12
0
    x = np.asarray(x).astype('float32')
    #x = cv2.imread("work/"+path,cv2.IMREAD_COLOR)
    x = cv2.cvtColor(x, cv2.COLOR_RGB2BGR) / 255.0
    x = (cv2.resize(x, (1024, 1024)) - 0.5) / 0.5
    return x


transform_ops = transforms.Compose([
    #transforms.Normalize([0.5,0.5,0.5],[0.5,0.5,0.5])   # this will do 1/255.0
    transforms.RandomHorizontalFlip(prob=0.5),
    transforms.RandomRotate(rotate_range=30, prob=0.5),
    transforms.RandomCrop(crop_size=224,
                          lower_scale=0.7,
                          lower_ratio=3. / 4,
                          upper_ratio=4. / 3),
    transforms.RandomDistort(brightness_range=0.1,
                             brightness_prob=0.5,
                             contrast_range=0.1,
                             contrast_prob=0.5,
                             saturation_range=0.1,
                             saturation_prob=0.0,
                             hue_range=0.1,
                             hue_prob=0.0)
])


def transform(img):
    #print("before transform: ",img.shape)
    img = transform_ops(img)[0]
    #print("after transform: ",img.shape)
    return img