def train(): from paddlex.cls import transforms train_transforms = transforms.Compose([ transforms.RandomCrop(crop_size=224), transforms.RandomHorizontalFlip(), transforms.Normalize() ]) eval_transforms = transforms.Compose([ transforms.ResizeByShort(short_size=256), transforms.CenterCrop(crop_size=224), transforms.Normalize() ]) train_dataset = pdx.datasets.ImageNet( data_dir=f'{data_dir}/mini_imagenet_veg', file_list=f'{data_dir}/mini_imagenet_veg/train_list.txt', label_list=f'{data_dir}/mini_imagenet_veg/labels.txt', transforms=train_transforms) eval_dataset = pdx.datasets.ImageNet( data_dir=f'{data_dir}/mini_imagenet_veg', file_list=f'{data_dir}/mini_imagenet_veg/val_list.txt', label_list=f'{data_dir}/mini_imagenet_veg/labels.txt', transforms=eval_transforms) num_classes = len(train_dataset.labels) model = pdx.cls.MobileNetV3_large_ssld(num_classes=num_classes) model.train(num_epochs=12, train_dataset=train_dataset, train_batch_size=32, eval_dataset=eval_dataset, lr_decay_epochs=[6, 8], save_interval_epochs=1, learning_rate=0.00625, save_dir=f'{output_dir}/output/mobilenetv3_large_ssld', use_vdl=True)
def train(): from paddlex.cls import transforms train_transforms = transforms.Compose([ transforms.RandomCrop(crop_size=224), transforms.RandomHorizontalFlip(), transforms.Normalize() ]) eval_transforms = transforms.Compose([ transforms.ResizeByShort(short_size=256), transforms.CenterCrop(crop_size=224), transforms.Normalize() ]) train_dataset = pdx.datasets.ImageNet( data_dir=f'{data_dir}/vegetables_cls', file_list=f'{data_dir}/vegetables_cls/train_list.txt', label_list=f'{data_dir}/vegetables_cls/labels.txt', transforms=train_transforms, shuffle=True) eval_dataset = pdx.datasets.ImageNet( data_dir=f'{data_dir}/vegetables_cls', file_list=f'{data_dir}/vegetables_cls/val_list.txt', label_list=f'{data_dir}/vegetables_cls/labels.txt', transforms=eval_transforms) num_classes = len(train_dataset.labels) model = pdx.cls.MobileNetV2(num_classes=num_classes) model.train(num_epochs=10, train_dataset=train_dataset, train_batch_size=32, eval_dataset=eval_dataset, lr_decay_epochs=[4, 6, 8], save_interval_epochs=1, learning_rate=0.025, save_dir=f'{output_dir}/mobilenetv2', use_vdl=True)
def train(model_dir=None, sensitivities_file=None, eval_metric_loss=0.05): # 下载和解压蔬菜分类数据集 veg_dataset = 'https://bj.bcebos.com/paddlex/datasets/vegetables_cls.tar.gz' pdx.utils.download_and_decompress(veg_dataset, path='./') # 定义训练和验证时的transforms train_transforms = transforms.Compose([ transforms.RandomCrop(crop_size=224), transforms.RandomHorizontalFlip(), transforms.Normalize() ]) eval_transforms = transforms.Compose([ transforms.ResizeByShort(short_size=256), transforms.CenterCrop(crop_size=224), transforms.Normalize() ]) # 定义训练和验证所用的数据集 train_dataset = pdx.datasets.ImageNet( data_dir='vegetables_cls', file_list='vegetables_cls/train_list.txt', label_list='vegetables_cls/labels.txt', transforms=train_transforms, shuffle=True) eval_dataset = pdx.datasets.ImageNet( data_dir='vegetables_cls', file_list='vegetables_cls/val_list.txt', label_list='vegetables_cls/labels.txt', transforms=eval_transforms) num_classes = len(train_dataset.labels) model = pdx.cls.MobileNetV2(num_classes=num_classes) if model_dir is None: # 使用imagenet数据集预训练模型权重 pretrain_weights = "IMAGENET" else: # 使用传入的model_dir作为预训练模型权重 assert os.path.isdir(model_dir), "Path {} is not a directory".format( model_dir) pretrain_weights = model_dir save_dir = './output/mobilenetv2' if sensitivities_file is not None: # DEFAULT 指使用模型预置的参数敏感度信息作为裁剪依据 if sensitivities_file != "DEFAULT": assert os.path.exists( sensitivities_file), "Path {} not exist".format( sensitivities_file) save_dir = './output/mobilenetv2_prune' model.train( num_epochs=10, train_dataset=train_dataset, train_batch_size=32, eval_dataset=eval_dataset, lr_decay_epochs=[4, 6, 8], learning_rate=0.025, pretrain_weights=pretrain_weights, save_dir=save_dir, use_vdl=True, sensitivities_file=sensitivities_file, eval_metric_loss=eval_metric_loss)
# 环境变量配置,用于控制是否使用GPU # 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu import os os.environ['CUDA_VISIBLE_DEVICES'] = '0' from paddlex.cls import transforms import paddlex as pdx # 下载和解压蔬菜分类数据集 # veg_dataset = 'https://bj.bcebos.com/paddlex/datasets/vegetables_cls.tar.gz' # pdx.utils.download_and_decompress(veg_dataset, path='./') # 定义训练和验证时的transforms # API说明https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/cls_transforms.html train_transforms = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.RandomRotate(), transforms.RandomDistort(), transforms.Normalize() ]) # eval_transforms = transforms.Compose([ # transforms.Normalize() # ]) # 定义训练和验证所用的数据集 # API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-imagenet train_dataset = pdx.datasets.ImageNet( data_dir='/home/aistudio/data/data67498/train', file_list='/home/aistudio/data/data67498/train/train_list.txt', label_list='/home/aistudio/data/data67498/train/labels.txt', transforms=train_transforms,
x = (cv2.resize(x, (1024, 1024)) - 0.5) / 0.5 return x def loader_test(path): x = Image.open(path).convert("RGB") x = np.asarray(x).astype('float32') #x = cv2.imread("work/"+path,cv2.IMREAD_COLOR) x = cv2.cvtColor(x, cv2.COLOR_RGB2BGR) / 255.0 x = (cv2.resize(x, (1024, 1024)) - 0.5) / 0.5 return x transform_ops = transforms.Compose([ #transforms.Normalize([0.5,0.5,0.5],[0.5,0.5,0.5]) # this will do 1/255.0 transforms.RandomHorizontalFlip(prob=0.5), transforms.RandomRotate(rotate_range=30, prob=0.5), transforms.RandomCrop(crop_size=224, lower_scale=0.7, lower_ratio=3. / 4, upper_ratio=4. / 3), transforms.RandomDistort(brightness_range=0.1, brightness_prob=0.5, contrast_range=0.1, contrast_prob=0.5, saturation_range=0.1, saturation_prob=0.0, hue_range=0.1, hue_prob=0.0) ])