def __init__(self, mode='train'):
        """
        初始化函数
        """
        assert mode in ['train', 'test',
                        'valid'], 'mode is one of train, test, valid.'

        self.data = []

        with open('signs/{}.txt'.format(mode)) as f:
            for line in f.readlines():
                info = line.strip().split('\t')

                if len(info) > 0:
                    self.data.append([info[0].strip(), info[1].strip()])

        if mode == 'train':
            self.transforms = T.Compose([
                T.RandomResizedCrop(IMAGE_SIZE),  # 随机裁剪大小
                T.RandomHorizontalFlip(0.5),  # 随机水平翻转
                T.ToTensor(),  # 数据的格式转换和标准化 HWC => CHW
                T.Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])  # 图像归一化
            ])
        else:
            self.transforms = T.Compose([
                T.Resize(256),  # 图像大小修改
                T.RandomCrop(IMAGE_SIZE),  # 随机裁剪
                T.ToTensor(),  # 数据的格式转换和标准化 HWC => CHW
                T.Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])  # 图像归一化
            ])
Пример #2
0
 def get_dataloader(self, num_workers):
     dataset = paddle.vision.datasets.MNIST(
         mode='test',
         transform=transforms.Compose([
             transforms.CenterCrop(20),
             transforms.RandomResizedCrop(14),
             transforms.Normalize(),
             transforms.ToTensor()
         ]))
     loader = paddle.io.DataLoader(dataset,
                                   batch_size=32,
                                   num_workers=num_workers,
                                   shuffle=True)
     return loader
Пример #3
0
from paddle.metric import Accuracy
from paddle.vision.models import mobilenet_v2


import warnings
warnings.filterwarnings("ignore")


from dataset import MyDataset
import paddle.vision.transforms as T



transform = T.Compose([
    T.RandomResizedCrop([448, 448]),
    T.RandomHorizontalFlip(),
    T.RandomRotation(90),
    T.ToTensor(),
    T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),

])

train_dataset = MyDataset(txt=r'C:\Users\11982\Desktop\paddle\train.txt', transform=transform)

train_loader = paddle.io.DataLoader(train_dataset, places=paddle.CPUPlace(), batch_size=8, shuffle=True, num_workers=8)

# build model
model = mobilenet_v2(pretrained=True,scale=1.0, num_classes=2, with_pool=True)

# 自定义Callback 记录训练过程中的loss信息
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author:gentelyang  time:2021-06-10
import os
import paddle
import paddle.distributed as dist
from paddle.io import DataLoader
from paddle.vision import transforms
normalize = transforms.Normalize(mean=[123.675, 116.28, 103.53],
                                 std=[58.395, 57.120, 57.375])
transform = transforms.Compose([
    transforms.RandomResizedCrop(224),
    transforms.RandomHorizontalFlip(),
    transforms.Transpose(), normalize
])


def train():
    # 设置支持多卡训练
    dist.init_parallel_env()
    train_dataset = paddle.vision.datasets.Cifar10(mode='train',
                                                   transform=transform)
    batch_sampler = paddle.io.DistributedBatchSampler(train_dataset,
                                                      batch_size=32,
                                                      shuffle=True)
    train_loader = DataLoader(dataset=train_dataset,
                              batch_sampler=batch_sampler)
    model = paddle.vision.mobilenet_v2(num_classes=10)
    # 设置支持多卡训练
    model = paddle.DataParallel(model)
    # 设置优化方法
Пример #5
0
        p.requires_grad = val


# augmentation utils

normalize = T.Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
                        std=[0.229 * 255, 0.224 * 255, 0.225 * 255],
                        data_format='CHW'),
augmentation = [
    byol.transforms.RandomApply([
        T.ColorJitter(0.8, 0.8, 0.8, 0.2),
    ], p=0.3),
    byol.transforms.RandomGrayscale(p=0.2),
    byol.transforms.RandomApply([byol.transforms.GaussianBlur((1.0, 2.0))],
                                p=0.2),
    T.RandomResizedCrop(224, scale=(0.2, 1.)),
    # T.RandomResizedCrop((image_size, image_size)),
    normalize
]

# MLP class for projector and predictor


class MLP(nn.Layer):
    def __init__(self, dim, projection_size, hidden_size=4096):
        super(MLP, self).__init__()
        self.net = nn.Sequential(nn.Linear(dim, hidden_size),
                                 nn.BatchNorm1D(hidden_size), nn.ReLU(),
                                 nn.Linear(hidden_size, projection_size))

    def forward(self, x):