def build_transform():
    CIFAR_MEAN = [0.5071, 0.4865, 0.4409]
    CIFAR_STD = [0.1942, 0.1918, 0.1958]
    train_transforms = Compose([
        RandomCrop(32, padding=4),
        ContrastTransform(0.1),
        BrightnessTransform(0.1),
        RandomHorizontalFlip(),
        RandomRotation(15),
        ToArray(),
        Normalize(CIFAR_MEAN, CIFAR_STD),
    ])
    test_transforms = Compose([ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD)])
    return train_transforms, test_transforms
Beispiel #2
0
    def test_with_dataloader(self):
        for device in self.devices:
            paddle.set_device(device)
            # data loader
            transform = Compose(
                [Normalize(
                    mean=[127.5], std=[127.5], data_format='CHW')])
            train_dataset = paddle.vision.datasets.MNIST(
                mode='train', transform=transform)
            train_loader = paddle.io.DataLoader(
                train_dataset,
                batch_size=64,
                shuffle=True,
                drop_last=True,
                num_workers=0)

            for batch_id, (image, _) in enumerate(train_loader()):
                out = self.custom_ops[0](image)
                pd_out = paddle.nn.functional.relu(image)
                self.assertTrue(
                    np.array_equal(out, pd_out),
                    "custom op out: {},\n paddle api out: {}".format(out,
                                                                     pd_out))

                if batch_id == 5:
                    break
Beispiel #3
0
def compute_features(model, use_flip, batch_size, workers, data_path):
    ccrop = transforms.Compose([
        ToArray(),
        Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], data_format='CHW'),
    ])
    ref_dataset = EvalDataset(
        os.path.dirname(data_path),
        os.path.basename(data_path),
        ccrop)
    eval_loader = paddle.io.DataLoader(
        ref_dataset,
        batch_size=batch_size, shuffle=False, drop_last=False,
        num_workers=workers)
    batch_time = AverageMeter('Time', ':6.3f')
    progress = ProgressMeter(
        len(eval_loader),
        [batch_time])
    outputs, targets = [], []
    end = time.time()
    for i, (images, target) in enumerate(eval_loader):
        targets.extend(target)
        # compute output
        output = model(images, im_k=None, use_flip=use_flip, is_train=False)
        outputs.append(output)
        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()
        if i % 10 == 0:
            progress.display(i)
    embeddings = paddle.concat(outputs)
    return embeddings, targets
Beispiel #4
0
def build_aug(args):
    """build augmentation transforms
    Args:
        args: data augmentation config
    Return: transforms
    """
    transforms = Compose([
        RandomHorizontalFlip(),
        ToArray(),
        Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5],
                  data_format='CHW'),
    ])
    return transforms
 def predict_class(self, im, key):
     # 预测小关节的等级
     from paddle.vision.transforms import Compose, Resize, Normalize, Transpose
     transforms = Compose([
         Resize(size=(224, 224)),
         Normalize(mean=[0.5, 0.5, 0.5],
                   std=[0.5, 0.5, 0.5],
                   data_format='HWC'),
         Transpose()
     ])
     self.arthrosis[key][2].eval()
     im = np.expand_dims(im, 2)
     infer_data = transforms(im)
     infer_data = np.expand_dims(infer_data, 0)
     infer_data = paddle.to_tensor(infer_data, dtype='float32')
     result = self.arthrosis[key][2](infer_data)[0]  # 关键代码,实现预测功能
     result = np.argmax(result.numpy())  # 获得最大值所在的序号
     return result
Beispiel #6
0
    def __init__(self, cfg, train=True):
        super(ImageNet, self).__init__()
        self.cfg = cfg
        self.train = train

        self.data_infor = get_data(cfg.Data.dataset)
        self.traindir = os.path.join(cfg.Data.Dir, 'train')
        self.valdir = os.path.join(cfg.Data.Dir, 'val')
        self.catedict = dict(
            zip(sorted(os.listdir(self.valdir)[:1000]), range(1000)))

        # transform
        # assured inumpyut is CHW
        self.normalize = Normalize(mean=self.data_infor.mean,
                                   std=self.data_infor.std,
                                   data_format='CHW')
        self.transform_train = [
            RandomResizedCrop(cfg.Trans.crop_size,
                              scale=(cfg.Trans.min_area_ratio, 1.0),
                              ratio=(3. / 4, cfg.Trans.aspect_ratio))
        ]

        if self.data_infor.eigval is not None and self.data_infor.eigvec is not None \
                and cfg.Trans.random_color:
            lighting = Lighting(0.1, self.data_infor.eigval,
                                self.data_infor.eigvec)
            jitter = ColorJitter(0.4, 0.4, 0.4)
            self.transform_train.extend([jitter, lighting])
        self.transform_train.extend(
            [RandomHorizontalFlip(),
             ToTensor(), self.normalize])
        self.transform_train = Compose(self.transform_train)
        self.transform_val = Compose([
            Resize(cfg.Trans.scale_size),
            CenterCrop(cfg.Trans.crop_size),
            ToTensor(), self.normalize
        ])

        self.file_list = self.get_samples()
            learning_rate=LinearWarmup(
                CosineAnnealingDecay(LR, MAX_EPOCH), 2000, 0., LR),
            momentum=MOMENTUM,
            parameters=model.parameters(),
            weight_decay=WEIGHT_DECAY),
        paddle.nn.CrossEntropyLoss(),
        paddle.metric.Accuracy(topk=(1, 5)))

    transforms = Compose([
        RandomCrop(32, padding=4),
        RandomApply(BrightnessTransform(0.1)),
        RandomApply(ContrastTransform(0.1)),
        RandomHorizontalFlip(),
        RandomRotation(15),
        ToArray(),
        Normalize(CIFAR_MEAN, CIFAR_STD),
    ])
    val_transforms = Compose([ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD)])
    train_set = Cifar100(DATA_FILE, mode='train', transform=transforms)
    test_set = Cifar100(DATA_FILE, mode='test', transform=val_transforms)
    callbacks = [LRSchedulerM(), callbacks.VisualDL(
        'vis_logs/res20_3x3_lr0.1cos_e300_bs128_bri_con_aug')]
    model.fit(
        train_set,
        test_set,
        epochs=MAX_EPOCH,
        batch_size=BATCH_SIZE,
        shuffle=True,
        num_workers=4,
        verbose=1,
        callbacks=callbacks,
paddle.enable_static()
paddle.set_device("gpu")

# model
image = static.data(shape=[None, 1, 28, 28], name='image', dtype='float32')
label = static.data(shape=[None, 1], name='label', dtype='int64')

net = LeNet()
out = net(image)
loss = nn.functional.cross_entropy(out, label)

opt = paddle.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)

# data loader
transform = Compose([Normalize(mean=[127.5], std=[127.5], data_format='CHW')])
train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform)
train_loader = paddle.io.DataLoader(train_dataset,
                                    feed_list=[image, label],
                                    batch_size=BATCH_SIZE,
                                    shuffle=True,
                                    drop_last=True,
                                    num_workers=2)

# prepare
exe = static.Executor()
exe.run(static.default_startup_program())

places = paddle.static.cuda_places()
compiled_program = static.CompiledProgram(
    static.default_main_program()).with_data_parallel(loss_name=loss.name,
Beispiel #9
0
def inference(model, left_imgs, right_ims, LOG):

    stages = 4
    model.eval()

    transform = Compose([ToTensor(),
                         Normalize(mean=imagenet_stats["mean"],
                                   std=imagenet_stats["std"])])

    for index in range(len(left_imgs)):
        # LOG.info("left = {}\tright = {}".format(left_imgs[index], right_ims[index]))

        left_img = cv2.imread(left_imgs[index], cv2.IMREAD_UNCHANGED)
        right_img = cv2.imread(right_ims[index], cv2.IMREAD_UNCHANGED)

        h, w, c = left_img.shape
        th, tw = 368, 1232

        if h<th or w<tw:
            continue

        left_img = left_img[h - th:h, w - tw:w, :]
        right_img = right_img[h - th:h, w - tw:w, :]

        left_input = transform(left_img[:, :, ::-1]).unsqueeze(axis=0)
        right_input = transform(right_img[:, :, ::-1]).unsqueeze(axis=0)

        with paddle.no_grad():

            start_time = time.time()
            outputs = model(left_input, right_input)
            cost_time = time.time()-start_time

            ss = "Inference 4 stages cost = {:.3f} sec, FPS = {:.1f}".format(cost_time, 1/cost_time)

            for stage in range(stages):
                outputs[stage] = outputs[stage].squeeze(axis=[0, 1]).numpy().astype(np.uint8)
                color_disp = cv2.applyColorMap(cv2.convertScaleAbs(outputs[stage], alpha=1, beta=0), cv2.COLORMAP_JET)

                if args.left_img:
                    temp_path = args.left_img.split("/")[0:-1]
                    temp_path = "/".join(temp_path)
                    save_img_path = os.path.join(temp_path, str(stage+1)+".png")
                    cv2.imwrite(save_img_path, color_disp)
                    LOG.info("{}\t\tSave img = {}".format(ss, save_img_path))

            if args.vis:
                concat_img = np.concatenate((left_img, color_disp), axis=0)
                # cv2.imshow("left_img", left_img)
                # cv2.imshow("raw_disp", outputs[stage])
                # cv2.imshow("color_disp", color_disp)
                cv2.imshow("concat_img", concat_img)
                key = cv2.waitKey(0)
                if key == ord("q"):
                    break

            if not args.left_img:
                img_name = left_imgs[index].split("/")[-1]
                save_img_path = os.path.join(args.save_path, img_name)
                cv2.imwrite(save_img_path, color_disp)
                LOG.info("{}\t\tSave img = {}".format(ss, save_img_path))
Beispiel #10
0
import paddle
from paddle import nn
from paddle import optimizer
from paddle.vision.transforms import Compose, Normalize
from paddle.vision.transforms import ToTensor

import paddle.distributed as dist

from model_zoo import Model

transform_tuple = Compose([ToTensor(), Normalize()])

parallel_flag = False

if __name__ == '__main__':

    if parallel_flag:
        dist.init_parallel_env()

    leakRelu_crossEntropy_adam = Model(transform_tuple, nn.LeakyReLU,
                                       nn.CrossEntropyLoss, optimizer.Adam)

    leakRelu_crossEntropy_adam.train()
    leakRelu_crossEntropy_adam.validate()

    relu_crossEntropy_sgd = Model(transform_tuple, nn.ReLU,
                                  nn.CrossEntropyLoss, optimizer.SGD)
    relu_crossEntropy_sgd.train()
    relu_crossEntropy_sgd.validate()

    leakReLuCrossEntropySgd = Model(transform_tuple, nn.LeakyReLU,
Beispiel #11
0
        self.linear3 = paddle.nn.Linear(in_features=100, out_features=10)

    def forward(self, x):
        # x = x.reshape((-1, 1, 28, 28))
        x = self.flatten(x)
        x = self.linear1(x)
        x = self.act1(x)
        x = self.linear2(x)
        x = self.act2(x)
        x = self.linear3(x)
        return x


if __name__ == '__main__':
    print("load file")
    transform = Normalize(mean=[127.5], std=[127.5], data_format='CHW')

    # 使用transform对数据集做归一化
    print('download training data and load training data')
    train_dataset = paddle.vision.datasets.MNIST(mode='train',
                                                 transform=transform)
    test_dataset = paddle.vision.datasets.MNIST(mode='test',
                                                transform=transform)
    print('load finished')

    print("show file")
    train_data0, train_label_0 = train_dataset[0][0], train_dataset[0][1]
    train_data0 = train_data0.reshape([28, 28])
    plt.figure(figsize=(2, 2))
    plt.imshow(train_data0, cmap=plt.cm.binary)
    print('train_data0 label is: ' + str(train_label_0))
Beispiel #12
0
class Discriminator(nn.layer):
    def __init__(self):
        super(Discriminator, self).__init__()
        self.dis == nn.Sequential(
            nn.Conv2D(1, 64, 4, 2, 1, bias_attr=False), nn.LeakyReLU(0.2),
            nn.Conv2D(64, 64 * 2, 4, 2, 1, bias_attr=False),
            nn.BatchNorm2D(64 * 2), nn.LeakyReLU(0.2),
            nn.Conv2D(64 * 2, 64 * 4, 4, 2, 1, bias_attr=False),
            nn.BatchNorm2D(64 * 4), nn.LeakyReLU(0.2),
            nn.Conv2D(64 * 4, 1, 4, 1, 0, bias_attr=False), nn.Sigmoid())

    def forward(self, x):
        return self.dis(x)


if __name__ == '__main__':
    train_dataset = paddle.vision.datasets.MNIST(mode='train',
                                                 transform=Compose([
                                                     Resize(size=(32, 32)),
                                                     Normalize(mean=[127.5],
                                                               std=[127.5])
                                                 ]))
    dataloader = paddle.io.DataLoader(dataset=train_dataset,
                                      batch_size=32,
                                      shuffle=True,
                                      num_workers=4)

    for data in dataloader:
        break
    print(data[0].shape)
Beispiel #13
0
URL = {
    "RN50": "https://bj.bcebos.com/paddleaudio/examples/clip/RN50.pdparams",
    "RN101": "https://bj.bcebos.com/paddleaudio/examples/clip/RN101.pdparams",
    "VIT": "https://bj.bcebos.com/paddleaudio/examples/clip/ViT-B-32.pdparam",
}

MEAN, STD = (0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258,
                                                  0.27577711)
_tokenizer = SimpleTokenizer()

transform = Compose([
    Resize(224, interpolation="bicubic"),
    CenterCrop(224),
    lambda image: image.convert("RGB"),
    ToTensor(),
    Normalize(mean=MEAN, std=STD),
    lambda t: t.unsqueeze_(0),
])


def tokenize(texts: Union[str, List[str]], context_length: int = 77):
    """
    Returns the tokenized representation of given input string(s)
    Parameters
    ----------
    texts : Union[str, List[str]]
        An input string or a list of input strings to tokenize
    context_length : int
        The context length to use; all CLIP models use 77 as the context length
    Returns
    -------
Beispiel #14
0
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.io import Dataset
from paddle.vision.transforms import Compose, Normalize
from paddle.vision.models import LeNet
import math
import numpy as np
import random
import os
from PIL import Image

normalize = Normalize(mean=[127.5], std=[127.5], data_format='HWC')


#创建数据集读取
class RandomDataset(Dataset):
    def __init__(self, root="data", mode="train"):
        self.mode = mode
        if mode == "train":
            self.txt_file = os.path.join(root, "train.txt")
        else:
            self.txt_file = os.path.join(root, "test.txt")
        self.records_list = []
        self.parse_dataset()

    def __getitem__(self, idx):
        path = self.records_list[idx][0]
        img = Image.open(path)
        img = img.resize((28, 28), Image.BILINEAR)
        img = normalize(img)