コード例 #1
0
    def __init__(self, mode='train'):
        """
        初始化函数
        """
        assert mode in ['train', 'test',
                        'valid'], 'mode is one of train, test, valid.'

        self.data = []

        with open('signs/{}.txt'.format(mode)) as f:
            for line in f.readlines():
                info = line.strip().split('\t')

                if len(info) > 0:
                    self.data.append([info[0].strip(), info[1].strip()])

        if mode == 'train':
            self.transforms = T.Compose([
                T.RandomResizedCrop(IMAGE_SIZE),  # 随机裁剪大小
                T.RandomHorizontalFlip(0.5),  # 随机水平翻转
                T.ToTensor(),  # 数据的格式转换和标准化 HWC => CHW
                T.Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])  # 图像归一化
            ])
        else:
            self.transforms = T.Compose([
                T.Resize(256),  # 图像大小修改
                T.RandomCrop(IMAGE_SIZE),  # 随机裁剪
                T.ToTensor(),  # 数据的格式转换和标准化 HWC => CHW
                T.Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])  # 图像归一化
            ])
コード例 #2
0
 def __init__(self, mode="train"):
     super(HumanClasDataset, self).__init__()
     self.data_path = data_dir
     ps = os.listdir(osp.join(self.data_path, "p"))
     ns = os.listdir(osp.join(self.data_path, "n"))
     ps.sort()
     ns.sort()
     ps = [osp.join("p", x) for x in ps]
     ns = [osp.join("n", x) for x in ns]
     ns = random.sample(ns, len(ps))
     data = []
     if mode == "train":
         for idx in range(int(len(ps) * 0.8)):
             data.append([ps[idx], 1])
         for idx in range(int(len(ns) * 0.8)):
             data.append([ns[idx], 0])
     else:
         for idx in range(int(len(ps) * 0.8), len(ps)):
             data.append([ps[idx], 1])
         for idx in range(int(len(ns) * 0.8), len(ns)):
             data.append([ns[idx], 0])
     self.data = data
     self.transform = vt.Compose([
         vt.ColorJitter(0.1, 0.1, 0.1, 0.1),
         # # vt.RandomRotation(10),
         vt.RandomHorizontalFlip(),
         vt.Resize(64),
         vt.ToTensor(),
     ])  # TODO: 研究合适的数据增强策略
コード例 #3
0
def main(_):
    transform = T.Compose([T.ToTensor(), T.Normalize(mean=0.5, std=0.5)])
    train_img_path = []
    train_label = []
    train_dataset = MyDataset(image=train_img_path,
                              lable=train_label,
                              transform=transform)
    train_loader = paddle.io.DataLoader(train_dataset,
                                        places=paddle.CPUPlace(),
                                        batch_size=2,
                                        shuffle=True)
    model = resnet18(pretrained=True, num_classes=102, with_pool=True)
    model = paddle.Model(model)
    optim = paddle.optimizer.Adam(learning_rate=0.001,
                                  parameters=model.parameters())
    """Train or evaluates the model."""
    if FLAGS.mode == 'train':
        model.prepare(
            optimizer=optim,
            loss=paddle.nn.MSELoss(),
            metric=Accuracy()  # topk计算准确率的top个数,默认是1
        )
        model.fit(
            train_loader,
            epochs=2,
            verbose=1,
        )
        model.evaluate(train_dataset, batch_size=2, verbose=1)
        model.save('inference_model', training=False)

    elif FLAGS.mode == 'eval_rollout':
        metadata = _read_metadata(FLAGS.data_path)
コード例 #4
0
ファイル: psenet_ic17.py プロジェクト: PaddlePaddle/Contrib
    def prepare_train_data(self, index):
        img_path = self.img_paths[index]
        gt_path = self.gt_paths[index]

        img = get_img(img_path, self.read_type)
        bboxes, words = get_ann(img, gt_path)

        # max line in gt
        if bboxes.shape[0] > self.max_word_num:
            bboxes = bboxes[:self.max_word_num]
            words = words[:self.max_word_num]

        if self.is_transform:
            img = random_scale(img, self.short_size)

        gt_instance = np.zeros(img.shape[0:2], dtype='uint8')
        training_mask = np.ones(img.shape[0:2], dtype='uint8')
        if bboxes.shape[0] > 0:  # line
            bboxes = np.reshape(bboxes * ([img.shape[1], img.shape[0]] * 4),
                                (bboxes.shape[0], -1, 2)).astype('int32')
            for i in range(bboxes.shape[0]):
                cv2.drawContours(gt_instance, [bboxes[i]], -1, i + 1, -1)
                if words[i] == '###':
                    cv2.drawContours(training_mask, [bboxes[i]], -1, 0, -1)

        gt_kernels = []
        for i in range(1, self.kernel_num):
            rate = 1.0 - (1.0 - self.min_scale) / (self.kernel_num - 1) * i
            gt_kernel = np.zeros(img.shape[0:2], dtype='uint8')
            kernel_bboxes = shrink(bboxes, rate)
            for i in range(bboxes.shape[0]):
                cv2.drawContours(gt_kernel, [kernel_bboxes[i].astype(int)], -1, 1, -1)
            gt_kernels.append(gt_kernel)

        if self.is_transform:
            imgs = [img, gt_instance, training_mask]
            imgs.extend(gt_kernels)

            if not self.with_rec:
                imgs = random_horizontal_flip(imgs)
            imgs = random_rotate(imgs)
            imgs = random_crop_padding(imgs, self.img_size)
            img, gt_instance, training_mask, gt_kernels = imgs[0], imgs[1], imgs[2], imgs[3:]
        gt_text = gt_instance.copy()
        gt_text[gt_text > 0] = 1
        gt_kernels = np.array(gt_kernels)

        img = Image.fromarray(img)
        img = img.convert('RGB')
        if self.is_transform:
            img = transforms.ColorJitter(brightness=32.0 / 255, saturation=0.5)(img)

        img = transforms.ToTensor()(img)
        img = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(img)
        gt_text = paddle.to_tensor(gt_text,dtype="int64")
        gt_kernels = paddle.to_tensor(gt_kernels,dtype="int64")
        training_mask = paddle.to_tensor(training_mask,dtype="int64")

        return img.numpy(),gt_text.numpy(),gt_kernels.numpy(),training_mask.numpy()
コード例 #5
0
ファイル: t2t.py プロジェクト: AgentMaker/Paddle-Image-Models
def get_transforms(resize, crop):
    transforms = T.Compose([
        T.Resize(resize, interpolation="bicubic"),
        T.CenterCrop(crop),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    return transforms
コード例 #6
0
def get_transforms(resize, crop):
    transforms = [T.Resize(resize, interpolation="bicubic")]
    if crop:
        transforms.append(T.CenterCrop(crop))
    transforms.append(T.ToTensor())
    transforms.append(
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    )
    transforms = T.Compose(transforms)
    return transforms
コード例 #7
0
def get_transforms(interpolation):
    transforms = T.Compose(
        [
            T.Resize(256, interpolation=interpolation),
            T.CenterCrop(224),
            T.ToTensor(),
            T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ]
    )
    return transforms
コード例 #8
0
ファイル: psgan_predictor.py プロジェクト: wannain/PaddleGAN
 def __init__(self, config, need_parser=True):
     self.img_size = 256
     self.transform = transform = T.Compose([
         T.Resize(size=256),
         T.ToTensor(),
     ])
     self.norm = T.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
     if need_parser:
         self.face_parser = futils.mask.FaceParser()
     self.up_ratio = 0.6 / 0.85
     self.down_ratio = 0.2 / 0.85
     self.width_ratio = 0.2 / 0.85
コード例 #9
0
 def __init__(self, model_path, use_cuda=True):
     self.net = Net(reid=True)
     self.device = "cuda" if use_cuda else "cpu"
     state_dict = torch.load(model_path)
     self.net.set_state_dict(state_dict)
     logger = logging.getLogger("root.tracker")
     logger.info("Loading weights from {}... Done!".format(model_path))
     # self.net.to(self.device)
     self.size = (64, 128)
     self.norm = transforms.Compose([
         transforms.ToTensor(data_format='HWC'),
         transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], data_format='HWC'),
     ])
コード例 #10
0
 def get_dataloader(self, num_workers):
     dataset = paddle.vision.datasets.MNIST(
         mode='test',
         transform=transforms.Compose([
             transforms.CenterCrop(20),
             transforms.RandomResizedCrop(14),
             transforms.Normalize(),
             transforms.ToTensor()
         ]))
     loader = paddle.io.DataLoader(dataset,
                                   batch_size=32,
                                   num_workers=num_workers,
                                   shuffle=True)
     return loader
コード例 #11
0
    def __init__(self, root_dir, label_file, is_bin=True):
        super(CommonDataset, self).__init__()
        self.root_dir = root_dir
        self.label_file = label_file
        self.full_lines = self.get_file_list(label_file)
        self.delimiter = "\t"
        self.is_bin = is_bin
        self.transform = transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
        ])

        self.num_samples = len(self.full_lines)
コード例 #12
0
    def prepare_test_data(self, index):
        img_path = self.img_paths[index]

        img = get_img(img_path, self.read_type)
        img_meta = dict(org_img_size=np.array(img.shape[:2]))

        img = scale_aligned_short(img, self.short_size)
        img_meta.update(dict(img_size=np.array(img.shape[:2])))

        img = Image.fromarray(img)
        img = img.convert('RGB')
        img = transforms.ToTensor()(img)
        img = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                   std=[0.229, 0.224, 0.225])(img)

        return img.numpy(), img_meta["img_size"], img_meta["org_img_size"]
コード例 #13
0
ファイル: Dataset.py プロジェクト: adjaisd/DCGAN-paddle2.0
    def __init__(self, opt=opt):
        super(DataGenerater, self).__init__()
        self.dir = opt.imgs_path

        self.datalist = os.listdir(
            self.dir) if opt.test == False else os.listdir(self.dir)[:100]
        self.batch_size = opt.batch_size

        img = Image.open(self.dir + self.datalist[0])
        self.image_size = img.size
        img.close()

        self.transform = T.Compose([
            T.Resize(opt.img_size),
            T.CenterCrop(opt.img_size),
            T.ToTensor(),
        ])
        self.num_path_dict = {}
コード例 #14
0
def load_train_test_datasets(dataset_root):
    mean = [0.485, 0.456, 0.406]
    std = [0.229, 0.224, 0.225]
    resize = transforms.Resize(256)
    rcrop = transforms.RandomCrop((224, 224))
    ccrop = transforms.CenterCrop((224, 224))
    tot = transforms.ToTensor()
    normalize = transforms.Normalize(mean, std)

    train_transforms = transforms.Compose([resize, rcrop, tot, normalize])
    test_transforms = transforms.Compose([resize, ccrop, tot, normalize])

    train_set = DatasetFolder(osp.join(dataset_root, 'train'),
                              transform=train_transforms)
    test_set = DatasetFolder(osp.join(dataset_root, 'test'),
                             transform=test_transforms)

    return train_set, test_set
コード例 #15
0
 def __init__(self, model_path, use_cuda=True, use_static=False):
     self.use_static = use_static
     if not use_static:
         self.net = torch.jit.load(model_path)
     else:
         place = paddle.CUDAPlace(0)
         self.exe = paddle.static.Executor(place)
         self.static_model = paddle.static.load_inference_model(
             model_path, self.exe)
     logger = logging.getLogger("root.tracker")
     logger.info("Loading weights from {}... Done!".format(model_path))
     # self.net.to(self.device)
     self.size = (64, 128)
     self.norm = transforms.Compose([
         transforms.ToTensor(data_format='HWC'),
         transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225],
                              data_format='HWC'),
     ])
コード例 #16
0
 def __init__(self, root_dir):
     super(MXFaceDataset, self).__init__()
     self.transform = transforms.Compose(
         [
          transforms.RandomHorizontalFlip(),
          transforms.ToTensor(),
          transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
          ])
     self.root_dir = root_dir
     path_imgrec = os.path.join(root_dir, 'train.rec')
     path_imgidx = os.path.join(root_dir, 'train.idx')
     self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')
     s = self.imgrec.read_idx(0)
     header, _ = recordio.unpack(s)
     if header.flag > 0:
         self.header0 = (int(header.label[0]), int(header.label[1]))
         self.imgidx = np.array(range(1, int(header.label[0])))
     else:
         self.imgidx = np.array(list(self.imgrec.keys))
コード例 #17
0
 def __init__(self, mode='train'):
     super(HumanClasDataset, self).__init__()
     self.data_path = "/home/aistudio/plane/bend/"
     ps = os.listdir(osp.join(self.data_path, "p"))
     ns = os.listdir(osp.join(self.data_path, "n"))
     ps.sort()
     ns.sort()
     ps = [osp.join("p", n) for n in ps]
     ns = [osp.join("n", n) for n in ns]
     data = []
     if mode == "train":
         for idx in range(int(len(ps) * 0.8)):
             data.append([ps[idx], 1])
         for idx in range(int(len(ns) * 0.8)):
             data.append([ns[idx], 0])
     else:
         for idx in range(int(len(ps) * 0.8), len(ps)):
             data.append([ps[idx], 1])
         for idx in range(int(len(ns) * 0.8), len(ns)):
             data.append([ns[idx], 0])
     self.data = data
     self.transform = vt.Compose([vt.ToTensor()])
コード例 #18
0
def main():
    args = parse_args()

    # 配置
    config = Config(args.model_file, args.params_file)
    config.disable_gpu()
    config.switch_use_feed_fetch_ops(False)
    config.switch_specify_input_names(True)

    # 创建paddlePredictor
    predictor = create_predictor(config)

    # 获取输入
    val_dataset = paddle.vision.datasets.MNIST(mode='test',
                                               transform=transforms.ToTensor())
    (image, label) = val_dataset[np.random.randint(10000)]
    # fake_input = np.random.randn(1, 1, 28, 28).astype("float32")
    # image = np.asndarray(image).astype("float32")
    # print(image.shape)
    image = image.numpy().reshape([1, 1, 28, 28])
    # print(image.shape)
    # print(fake_input.shape)
    input_names = predictor.get_input_names()
    input_handle = predictor.get_input_handle(input_names[0])
    input_handle.reshape([1, 1, 28, 28])
    input_handle.copy_from_cpu(image)

    # 运行predictor
    predictor.run()

    # 获取输出
    output_names = predictor.get_output_names()
    output_handle = predictor.get_output_handle(output_names[0])
    output = output_handle.copy_to_cpu()

    print("True label: ", label.item())
    print("Prediction: ", np.argmax(output))
コード例 #19
0
ファイル: from_paddle.py プロジェクト: chenghanpeng/tvm
        tar.extract(name, "./")

model = paddle.jit.load("./paddle_resnet50/model")

######################################################################
# Load a test image
# ---------------------------------------------
# A single cat dominates the examples!

from PIL import Image
import paddle.vision.transforms as T

transforms = T.Compose([
    T.Resize((256, 256)),
    T.CenterCrop(224),
    T.ToTensor(),
    T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])

img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))

img = transforms(img)
img = np.expand_dims(img, axis=0)

######################################################################
# Compile the model with relay
# ---------------------------------------------

target = "llvm"
コード例 #20
0
ファイル: train.py プロジェクト: LiJinrun/softwarecup2020
def main(opt):
    # torch.manual_seed(opt.seed)
    # torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    paddle.seed(opt.seed)
    print('Setting up data...')
    Dataset = get_dataset(opt.dataset, opt.task)
    f = open(opt.data_cfg)
    data_config = json.load(f)
    trainset_paths = data_config['train']
    dataset_root = data_config['root']
    f.close()
    transforms = T.Compose([T.ToTensor()])
    dataset = Dataset(opt,
                      dataset_root,
                      trainset_paths, (1088, 608),
                      augment=True,
                      transforms=transforms)
    opt = opts().update_dataset_info_and_set_heads(opt, dataset)
    print(opt)

    logger = Logger(opt)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    # opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
    opt.device = paddle.get_device()

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)

    start_epoch = 0

    # Get dataloader

    # train_loader = torch.utils.data.DataLoader(
    #     dataset,
    #     batch_size=opt.batch_size,
    #     shuffle=True,
    #     num_workers=opt.num_workers,
    #     pin_memory=True,
    #     drop_last=True
    # )
    train_loader = DataLoader(dataset,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              num_workers=opt.num_workers,
                              use_shared_memory=False,
                              drop_last=True)
    print('Starting training...')
    Trainer = train_factory[opt.task]
    # optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    # optimizer = paddle.optimizer.Adam(learning_rate=opt.lr, parameters=model.parameters()) # 这句代码的作用纯粹是为了传个参数,

    # trainer = Trainer(opt, model, optimizer)
    trainer = Trainer(opt, model)
    optimizer = trainer.optimizer  # 见base_trainer.py
    id_classifier = trainer.loss.classifier  # 见base_trainer.py
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
    if 'fairmot_hrnet_w18' in opt.load_model:
        model = load_model(model, opt.load_model)
    elif opt.load_model != '':
        model, optimizer, start_epoch, id_classifier = load_model(
            model, opt.load_model, trainer.optimizer, trainer.loss.classifier,
            opt.resume, opt.lr, opt.lr_step)

    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))

        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pdparams'.format(mark)),
                epoch, model, optimizer, id_classifier)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pdparams'),
                       epoch, model, optimizer, id_classifier)
        logger.write('\n')
        if epoch in opt.lr_step:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pdparams'.format(epoch)),
                epoch, model, optimizer, id_classifier)
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            # for param_group in optimizer.param_groups:
            #     param_group['lr'] = lr
            optimizer.set_lr(lr)
        if epoch % 5 == 0 or epoch >= 25:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pdparams'.format(epoch)),
                epoch, model, optimizer, id_classifier)
    logger.close()
コード例 #21
0
ファイル: psenet_synth.py プロジェクト: PaddlePaddle/Contrib
    def __getitem__(self, index):
        img_path = synth_train_data_dir + self.img_paths[index][0]
        img = get_img(img_path, read_type=self.read_type)
        bboxes, words = get_ann(img, self.gts, self.texts, index)

        if bboxes.shape[0] > self.max_word_num:
            bboxes = bboxes[:self.max_word_num]
            words = words[:self.max_word_num]

        if self.is_transform:
            img = random_scale(img, self.short_size)

        gt_instance = np.zeros(img.shape[0:2], dtype='uint8')
        training_mask = np.ones(img.shape[0:2], dtype='uint8')
        if bboxes.shape[0] > 0:
            bboxes = np.reshape(bboxes * ([img.shape[1], img.shape[0]] * 4),
                                (bboxes.shape[0], -1, 2)).astype('int32')
            for i in range(bboxes.shape[0]):
                cv2.drawContours(gt_instance, [bboxes[i]], -1, i + 1, -1)
                if words[i] == '###':
                    cv2.drawContours(training_mask, [bboxes[i]], -1, 0, -1)

        gt_kernels = []
        for i in range(1, self.kernel_num):
            rate = 1.0 - (1.0 - self.min_scale) / (self.kernel_num - 1) * i
            gt_kernel = np.zeros(img.shape[0:2], dtype='uint8')
            kernel_bboxes = shrink(bboxes, rate)
            for i in range(len(bboxes)):
                cv2.drawContours(gt_kernel, [kernel_bboxes[i].astype(int)], -1,
                                 1, -1)
            gt_kernels.append(gt_kernel)

        if self.is_transform:
            imgs = [img, gt_instance, training_mask]
            imgs.extend(gt_kernels)

            imgs = random_horizontal_flip(imgs)
            imgs = random_rotate(imgs)
            imgs = random_crop_padding(imgs, self.img_size)
            img, gt_instance, training_mask, gt_kernels = imgs[0], imgs[
                1], imgs[2], imgs[3:]

        gt_text = gt_instance.copy()
        gt_text[gt_text > 0] = 1
        gt_kernels = np.array(gt_kernels)

        max_instance = np.max(gt_instance)
        gt_bboxes = np.zeros((self.max_word_num, 4), dtype=np.int32)
        for i in range(1, max_instance + 1):
            ind = gt_instance == i
            if np.sum(ind) == 0:
                continue
            points = np.array(np.where(ind)).transpose((1, 0))
            tl = np.min(points, axis=0)
            br = np.max(points, axis=0) + 1
            gt_bboxes[i] = (tl[0], tl[1], br[0], br[1])

        img = Image.fromarray(img)
        img = img.convert('RGB')

        if self.is_transform:
            img = transforms.ColorJitter(brightness=32.0 / 255,
                                         saturation=0.5)(img)

        img = transforms.ToTensor()(img)
        img = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                   std=[0.229, 0.224, 0.225])(img)

        gt_text = paddle.to_tensor(gt_text, dtype="int64")
        gt_kernels = paddle.to_tensor(gt_kernels, dtype="int64")
        training_mask = paddle.to_tensor(training_mask, dtype="int64")

        data = dict(
            imgs=img,
            gt_texts=gt_text,
            gt_kernels=gt_kernels,
            training_masks=training_mask,
        )

        return img.numpy(), gt_text.numpy(), gt_kernels.numpy(
        ), training_mask.numpy()
コード例 #22
0
def predict(path, model, cfg, out_path):

    if not os.path.exists(out_path):
        os.makedirs(out_path)

    model.eval()

    rf = ResultFormat(cfg.data.test.type, cfg.test_cfg.result_path)

    if cfg.report_speed:
        speed_meters = dict(backbone_time=AverageMeter(500),
                            neck_time=AverageMeter(500),
                            det_head_time=AverageMeter(500),
                            det_pse_time=AverageMeter(500),
                            rec_time=AverageMeter(500),
                            total_time=AverageMeter(500))

    def get_img(img_path, read_type='pil'):
        try:
            if read_type == 'cv2':
                img = cv2.imread(img_path)
                img = img[:, :, [2, 1, 0]]
            elif read_type == 'pil':
                img = np.array(Image.open(img_path))
        except Exception as e:
            print('Cannot read image: %s.' % img_path)
            raise
        return img

    def scale_aligned_short(img, short_size=736):
        h, w = img.shape[0:2]
        scale = short_size * 1.0 / min(h, w)
        h = int(h * scale + 0.5)
        w = int(w * scale + 0.5)
        if h % 32 != 0:
            h = h + (32 - h % 32)
        if w % 32 != 0:
            w = w + (32 - w % 32)
        img = cv2.resize(img, dsize=(w, h))
        return img

    imgs_list = os.listdir(path)
    data_lists = []
    for img_path in imgs_list:
        img = get_img(osp.join(path, img_path))
        img_meta = dict(org_img_size=np.array(img.shape[:2]))

        img = scale_aligned_short(img, int(cfg.data.test.short_size))
        img_meta.update(dict(img_size=np.array(img.shape[:2])))
        img = Image.fromarray(img)
        img = img.convert('RGB')
        img = transforms.ToTensor()(img)
        img = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                   std=[0.229, 0.224, 0.225])(img)
        img = img.numpy()
        img = np.expand_dims(img, axis=0)
        img = paddle.to_tensor(img)
        img_meta["img_size"] = paddle.to_tensor(
            np.expand_dims(img_meta["img_size"], axis=0))
        img_meta["org_img_size"] = paddle.to_tensor(
            np.expand_dims(img_meta["org_img_size"], axis=0))
        data_lists.append((img, img_meta["img_size"], img_meta["org_img_size"],
                           osp.join(path, img_path)))

    for idx, data in enumerate(data_lists):
        img_meta = {}
        img_meta["img_size"] = data[1]
        img_meta["org_img_size"] = data[2]
        img_path = data[3]
        data_dict = dict(imgs=data[0], img_metas=img_meta)
        print('Testing %d/%d' % (idx, len(data_lists)))
        sys.stdout.flush()

        # prepare input
        data_dict['imgs'] = data_dict['imgs']
        data_dict.update(dict(cfg=cfg))

        # forward
        with paddle.no_grad():
            outputs = model(**data_dict)

        if cfg.report_speed:
            report_speed(outputs, speed_meters)

        data_type = cfg.data.train.type
        if data_type == "PSENET_IC15" or data_type == "PSENET_IC17":
            save_img = cv2.imread(img_path)
            for bbox in outputs["bboxes"]:
                cv2.rectangle(save_img, (bbox[2], bbox[3]), (bbox[6], bbox[7]),
                              (0, 0, 255), 2)
            save_jpg = img_path.split("/")[-1]
            save_img_path = osp.join(out_path, save_jpg)
            cv2.imwrite(save_img_path, save_img)

        elif data_type == "PSENET_TT":
            save_img = cv2.imread(img_path)
            for bbox in outputs["bboxes"]:
                xs = []
                ys = []
                i = 0
                for point in bbox:
                    if i % 2 == 0:
                        xs.append(point)
                    else:
                        ys.append(point)
                    i = i + 1
                for idx, x in enumerate(xs):
                    if idx < len(xs) - 1:
                        cv2.line(save_img, (x, ys[idx]),
                                 (xs[idx + 1], ys[idx + 1]), (0, 0, 255), 2)
                    else:
                        cv2.line(save_img, (x, ys[idx]), (xs[0], ys[0]),
                                 (0, 0, 255), 2)
            save_jpg = img_path.split("/")[-1]
            save_img_path = osp.join(out_path, save_jpg)
            cv2.imwrite(save_img_path, save_img)
コード例 #23
0
    logger.info("=" * 60)
    logger.info("Overall Configurations:")
    for cfg_, value in cfg.items():
        logger.info(cfg_ + ":" + str(value))
    logger.info("=" * 60)

    fleet.init(is_collective=True)

    train_transform = transforms.Compose([
        # refer to https://pytorch.org/docs/stable/torchvision/transforms.html for more build-in online data augmentation
        transforms.Resize(
            [int(128 * INPUT_SIZE[0] / 112),
             int(128 * INPUT_SIZE[0] / 112)]),  # smaller side resized
        transforms.RandomCrop([INPUT_SIZE[0], INPUT_SIZE[1]]),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=RGB_MEAN, std=RGB_STD),
    ])
    logger.info('loading data form file:{}'.format(DATA_ROOT))
    train_dataset = LFWDataset(os.path.join(DATA_ROOT), train_transform)
    num_classes = train_dataset.num_classes
    train_loader = paddle.io.DataLoader(train_dataset,
                                        places=[paddle.CPUPlace()],
                                        batch_size=BATCH_SIZE,
                                        shuffle=True,
                                        drop_last=DROP_LAST,
                                        num_workers=NUM_WORKERS)

    NUM_CLASS = train_dataset.num_classes
    logger.info("Number of Training Classes: {}".format(NUM_CLASS))
    # lfw, cfp_ff, cfp_fp, agedb, calfw, cplfw, vgg2_fp, lfw_issame, cfp_ff_issame, cfp_fp_issame, agedb_issame, calfw_issame, cplfw_issame, vgg2_fp_issame = get_val_data(
コード例 #24
0
import paddle
import paddle.vision.transforms as transforms

# will segment fault
num_workers = 0

# OK
# num_workers = 1
trainset = paddle.vision.datasets.MNIST(mode='test',
                                        transform=transforms.ToTensor())

trainloader = paddle.io.DataLoader(trainset,
                                   batch_size=32,
                                   num_workers=num_workers,
                                   shuffle=True)

for epoch_id in range(3):
    print("start iter")
    for batch_idx, data in enumerate(trainloader):
        print("epoch {}, batch {}".format(epoch_id, batch_idx))
コード例 #25
0
        return len(self.segment)

    def __getitem__(self, idx):
        data = self.segment[idx]
        with data.open() as fp:
            image_tensor = self.transform(Image.open(fp))

        return image_tensor, self.category_to_index[
            data.label.classification.category]
        # """"""


"""Build a dataloader and run it"""
ACCESS_KEY = "<YOUR_ACCESSKEY>"

to_tensor = transforms.ToTensor()
normalization = transforms.Normalize(mean=[0.485], std=[0.229])
my_transforms = transforms.Compose([to_tensor, normalization])

train_segment = DogsVsCatsSegment(GAS(ACCESS_KEY),
                                  segment_name="train",
                                  transform=my_transforms)
train_dataloader = DataLoader(train_segment,
                              batch_size=4,
                              shuffle=True,
                              num_workers=0)

for index, (image, label) in enumerate(train_dataloader):
    print(f"{index}: {label}")
""""""