def __init__(self, mode='train'):
        """
        初始化函数
        """
        assert mode in ['train', 'test',
                        'valid'], 'mode is one of train, test, valid.'

        self.data = []

        with open('signs/{}.txt'.format(mode)) as f:
            for line in f.readlines():
                info = line.strip().split('\t')

                if len(info) > 0:
                    self.data.append([info[0].strip(), info[1].strip()])

        if mode == 'train':
            self.transforms = T.Compose([
                T.RandomResizedCrop(IMAGE_SIZE),  # 随机裁剪大小
                T.RandomHorizontalFlip(0.5),  # 随机水平翻转
                T.ToTensor(),  # 数据的格式转换和标准化 HWC => CHW
                T.Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])  # 图像归一化
            ])
        else:
            self.transforms = T.Compose([
                T.Resize(256),  # 图像大小修改
                T.RandomCrop(IMAGE_SIZE),  # 随机裁剪
                T.ToTensor(),  # 数据的格式转换和标准化 HWC => CHW
                T.Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])  # 图像归一化
            ])
Пример #2
0
    def __init__(self, device="cpu"):
        self.mapper = {
            0: 0,
            1: 1,
            2: 2,
            3: 3,
            4: 4,
            5: 5,
            6: 0,
            7: 11,
            8: 12,
            9: 0,
            10: 6,
            11: 8,
            12: 7,
            13: 9,
            14: 13,
            15: 0,
            16: 0,
            17: 10,
            18: 0
        }
        #self.dict = paddle.to_tensor(mapper)
        self.save_pth = get_path_from_url(BISENET_WEIGHT_URL,
                                          osp.split(osp.realpath(__file__))[0])

        self.net = BiSeNet(n_classes=19)

        self.transforms = T.Compose([
            T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        ])
    def test_visualdl_callback(self):
        # visualdl not support python2
        if sys.version_info < (3, ):
            return

        inputs = [InputSpec([-1, 1, 28, 28], 'float32', 'image')]
        labels = [InputSpec([None, 1], 'int64', 'label')]

        transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
        train_dataset = MnistDataset(mode='train', transform=transform)
        eval_dataset = MnistDataset(mode='test', transform=transform)

        net = paddle.vision.models.LeNet()
        model = paddle.Model(net, inputs, labels)

        optim = paddle.optimizer.Adam(0.001, parameters=net.parameters())
        model.prepare(optimizer=optim,
                      loss=paddle.nn.CrossEntropyLoss(),
                      metrics=paddle.metric.Accuracy())

        callback = paddle.callbacks.VisualDL(log_dir='visualdl_log_dir')
        model.fit(train_dataset,
                  eval_dataset,
                  batch_size=64,
                  callbacks=callback)
Пример #4
0
    def __init__(self, cfg):
        """Initialize this dataset class.

        Parameters:
            opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseDataset.__init__(self, cfg)
        self.image_path = cfg.dataroot
        self.mode = cfg.phase
        self.transform = get_makeup_transform(cfg)

        self.norm = T.Normalize([127.5, 127.5, 127.5], [127.5, 127.5, 127.5])
        self.transform_mask = get_makeup_transform(cfg, pic="mask")
        self.trans_size = cfg.trans_size
        self.cls_list = cfg.cls_list
        self.cls_A = self.cls_list[0]
        self.cls_B = self.cls_list[1]
        for cls in self.cls_list:
            setattr(
                self, cls + "_list_path",
                os.path.join(self.image_path, self.mode + '_' + cls + ".txt"))
            setattr(self, cls + "_lines",
                    open(getattr(self, cls + "_list_path"), 'r').readlines())
            setattr(self, "num_of_" + cls + "_data",
                    len(getattr(self, cls + "_lines")))
        print('Start preprocessing dataset..!')
        self.preprocess()
        print('Finished preprocessing dataset..!')
def prepare_input():
    transforms = [
        T.Resize(size=(target_height, target_width)),
        T.Normalize(mean=(0, 0, 0),
                    std=(1, 1, 1),
                    data_format='HWC',
                    to_rgb=True),
        T.Transpose()
    ]

    img_file = root_path / "street.jpeg"
    img = cv2.imread(str(img_file))
    normalized_img = T.Compose(transforms)(img)
    normalized_img = normalized_img.astype(np.float32, copy=False) / 255.0

    # add an new axis in front
    img_input = normalized_img[np.newaxis, :]
    # scale_factor is calculated as: im_shape / original_im_shape
    h_scale = target_height / img.shape[0]
    w_scale = target_width / img.shape[1]
    input = {
        "image": img_input,
        "im_shape": [target_height, target_width],
        "scale_factor": [h_scale, w_scale]
    }
    return input, img
Пример #6
0
def main(_):
    transform = T.Compose([T.ToTensor(), T.Normalize(mean=0.5, std=0.5)])
    train_img_path = []
    train_label = []
    train_dataset = MyDataset(image=train_img_path,
                              lable=train_label,
                              transform=transform)
    train_loader = paddle.io.DataLoader(train_dataset,
                                        places=paddle.CPUPlace(),
                                        batch_size=2,
                                        shuffle=True)
    model = resnet18(pretrained=True, num_classes=102, with_pool=True)
    model = paddle.Model(model)
    optim = paddle.optimizer.Adam(learning_rate=0.001,
                                  parameters=model.parameters())
    """Train or evaluates the model."""
    if FLAGS.mode == 'train':
        model.prepare(
            optimizer=optim,
            loss=paddle.nn.MSELoss(),
            metric=Accuracy()  # topk计算准确率的top个数,默认是1
        )
        model.fit(
            train_loader,
            epochs=2,
            verbose=1,
        )
        model.evaluate(train_dataset, batch_size=2, verbose=1)
        model.save('inference_model', training=False)

    elif FLAGS.mode == 'eval_rollout':
        metadata = _read_metadata(FLAGS.data_path)
Пример #7
0
    def prepare_train_data(self, index):
        img_path = self.img_paths[index]
        gt_path = self.gt_paths[index]

        img = get_img(img_path, self.read_type)
        bboxes, words = get_ann(img, gt_path)

        # max line in gt
        if bboxes.shape[0] > self.max_word_num:
            bboxes = bboxes[:self.max_word_num]
            words = words[:self.max_word_num]

        if self.is_transform:
            img = random_scale(img, self.short_size)

        gt_instance = np.zeros(img.shape[0:2], dtype='uint8')
        training_mask = np.ones(img.shape[0:2], dtype='uint8')
        if bboxes.shape[0] > 0:  # line
            bboxes = np.reshape(bboxes * ([img.shape[1], img.shape[0]] * 4),
                                (bboxes.shape[0], -1, 2)).astype('int32')
            for i in range(bboxes.shape[0]):
                cv2.drawContours(gt_instance, [bboxes[i]], -1, i + 1, -1)
                if words[i] == '###':
                    cv2.drawContours(training_mask, [bboxes[i]], -1, 0, -1)

        gt_kernels = []
        for i in range(1, self.kernel_num):
            rate = 1.0 - (1.0 - self.min_scale) / (self.kernel_num - 1) * i
            gt_kernel = np.zeros(img.shape[0:2], dtype='uint8')
            kernel_bboxes = shrink(bboxes, rate)
            for i in range(bboxes.shape[0]):
                cv2.drawContours(gt_kernel, [kernel_bboxes[i].astype(int)], -1, 1, -1)
            gt_kernels.append(gt_kernel)

        if self.is_transform:
            imgs = [img, gt_instance, training_mask]
            imgs.extend(gt_kernels)

            if not self.with_rec:
                imgs = random_horizontal_flip(imgs)
            imgs = random_rotate(imgs)
            imgs = random_crop_padding(imgs, self.img_size)
            img, gt_instance, training_mask, gt_kernels = imgs[0], imgs[1], imgs[2], imgs[3:]
        gt_text = gt_instance.copy()
        gt_text[gt_text > 0] = 1
        gt_kernels = np.array(gt_kernels)

        img = Image.fromarray(img)
        img = img.convert('RGB')
        if self.is_transform:
            img = transforms.ColorJitter(brightness=32.0 / 255, saturation=0.5)(img)

        img = transforms.ToTensor()(img)
        img = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(img)
        gt_text = paddle.to_tensor(gt_text,dtype="int64")
        gt_kernels = paddle.to_tensor(gt_kernels,dtype="int64")
        training_mask = paddle.to_tensor(training_mask,dtype="int64")

        return img.numpy(),gt_text.numpy(),gt_kernels.numpy(),training_mask.numpy()
Пример #8
0
 def run_model(self, model):
     transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
     train_dataset = MNIST(mode='train', transform=transform)
     model.fit(train_dataset,
               epochs=1,
               batch_size=64,
               num_iters=2,
               log_freq=1)
Пример #9
0
def get_transforms(resize, crop):
    transforms = T.Compose([
        T.Resize(resize, interpolation="bicubic"),
        T.CenterCrop(crop),
        T.ToTensor(),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    return transforms
Пример #10
0
    def test_ptq(self):
        seed = 1
        np.random.seed(seed)
        paddle.static.default_main_program().random_seed = seed
        paddle.static.default_startup_program().random_seed = seed

        _logger.info("create the fp32 model")
        fp32_lenet = ImperativeLenet()

        _logger.info("prepare data")
        batch_size = 64
        transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
        train_dataset = paddle.vision.datasets.MNIST(
            mode='train', backend='cv2', transform=transform)
        val_dataset = paddle.vision.datasets.MNIST(
            mode='test', backend='cv2', transform=transform)

        place = paddle.CUDAPlace(0) \
            if paddle.is_compiled_with_cuda() else paddle.CPUPlace()
        train_reader = paddle.io.DataLoader(
            train_dataset,
            drop_last=True,
            places=place,
            batch_size=batch_size,
            return_list=True)
        test_reader = paddle.io.DataLoader(
            val_dataset, places=place, batch_size=batch_size, return_list=True)

        _logger.info("train the fp32 model")
        self.model_train(fp32_lenet, train_reader)

        _logger.info("test fp32 model")
        fp32_top1, fp32_top5 = self.model_test(fp32_lenet, test_reader)

        _logger.info("quantize the fp32 model")
        quanter = PTQ()
        quant_lenet = quanter.quantize(fp32_lenet, fuse=True)

        _logger.info("calibrate")
        self.calibrate(quant_lenet, test_reader)

        _logger.info("save and test the quantized model")
        save_path = "./tmp/model"
        input_spec = paddle.static.InputSpec(
            shape=[None, 1, 28, 28], dtype='float32')
        quanter.save_quantized_model(
            quant_lenet, save_path, input_spec=[input_spec])
        quant_top1, quant_top5 = self.model_test(quant_lenet, test_reader)

        _logger.info("FP32 acc: top1: {}, top5: {}".format(fp32_top1,
                                                           fp32_top5))
        _logger.info("Int acc: top1: {}, top5: {}".format(quant_top1,
                                                          quant_top5))

        diff = 0.002
        self.assertTrue(
            fp32_top1 - quant_top1 < diff,
            msg="The acc of quant model is too lower than fp32 model")
Пример #11
0
	def __init__(self, path, fl, sz=64):
		super(TeDataset, self).__init__()
		self.path = os.path.join(path,fl)
		self.sz = sz
		self.data = self.get_data()
		self.as_tensor = T.Compose([
			T.Normalize([90.39095958, 89.36796833, 85.25276458],
			            [3.09639721, 2.50642894, 2.7135403],
			            ),
		])
Пример #12
0
 def __init__(self, noise_path, size, keys=None):
     self.noise_path = noise_path
     self.noise_imgs = sorted(glob.glob(noise_path + '*.png'))
     self.size = size
     self.keys = keys
     self.transform = T.Compose([
         T.RandomCrop(size),
         T.Transpose(),
         T.Normalize([0., 0., 0.], [255., 255., 255.])
     ])
Пример #13
0
def get_transforms(interpolation):
    transforms = T.Compose(
        [
            T.Resize(256, interpolation=interpolation),
            T.CenterCrop(224),
            T.ToTensor(),
            T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ]
    )
    return transforms
Пример #14
0
def get_transforms(resize, crop):
    transforms = [T.Resize(resize, interpolation="bicubic")]
    if crop:
        transforms.append(T.CenterCrop(crop))
    transforms.append(T.ToTensor())
    transforms.append(
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    )
    transforms = T.Compose(transforms)
    return transforms
Пример #15
0
 def __init__(self, config, need_parser=True):
     self.img_size = 256
     self.transform = transform = T.Compose([
         T.Resize(size=256),
         T.Permute(to_rgb=False),
     ])
     self.norm = T.Normalize([127.5, 127.5, 127.5], [127.5, 127.5, 127.5])
     if need_parser:
         self.face_parser = futils.mask.FaceParser()
     self.up_ratio = 0.6 / 0.85
     self.down_ratio = 0.2 / 0.85
     self.width_ratio = 0.2 / 0.85
Пример #16
0
 def __init__(self, config, need_parser=True):
     self.img_size = 256
     self.transform = transform = T.Compose([
         T.Resize(size=256),
         T.ToTensor(),
     ])
     self.norm = T.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
     if need_parser:
         self.face_parser = futils.mask.FaceParser()
     self.up_ratio = 0.6 / 0.85
     self.down_ratio = 0.2 / 0.85
     self.width_ratio = 0.2 / 0.85
Пример #17
0
 def __init__(self, model_path, use_cuda=True):
     self.net = Net(reid=True)
     self.device = "cuda" if use_cuda else "cpu"
     state_dict = torch.load(model_path)
     self.net.set_state_dict(state_dict)
     logger = logging.getLogger("root.tracker")
     logger.info("Loading weights from {}... Done!".format(model_path))
     # self.net.to(self.device)
     self.size = (64, 128)
     self.norm = transforms.Compose([
         transforms.ToTensor(data_format='HWC'),
         transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], data_format='HWC'),
     ])
Пример #18
0
 def get_dataloader(self, num_workers):
     dataset = paddle.vision.datasets.MNIST(
         mode='test',
         transform=transforms.Compose([
             transforms.CenterCrop(20),
             transforms.RandomResizedCrop(14),
             transforms.Normalize(),
             transforms.ToTensor()
         ]))
     loader = paddle.io.DataLoader(dataset,
                                   batch_size=32,
                                   num_workers=num_workers,
                                   shuffle=True)
     return loader
Пример #19
0
 def __init__(self, data_root, input_size, mean, std):
     super(NormalDataset, self).__init__()
     self.mean = mean
     self.std = std
     self.input_size = input_size
     self.data_root = data_root
     self.trans = transforms.Compose([
         # transforms.Resize([int(self.input_size[0]), int(self.input_size[1])]),  # smaller side resized
         transforms.Transpose(order=(2, 0, 1)),
         transforms.Normalize(mean=self.mean, std=self.std),
     ])
     self.image_data, self.image_label = self.data_prepare()
     self.num_classes = len(self.image_data)
     self.len = len(self.image_data)
Пример #20
0
    def __init__(self, root_dir, label_file, is_bin=True):
        super(CommonDataset, self).__init__()
        self.root_dir = root_dir
        self.label_file = label_file
        self.full_lines = self.get_file_list(label_file)
        self.delimiter = "\t"
        self.is_bin = is_bin
        self.transform = transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
        ])

        self.num_samples = len(self.full_lines)
Пример #21
0
    def __init__(self, *args, **kwargs):
        super(TestStaticMasks, self).__init__(*args, **kwargs)
        paddle.disable_static()
        transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
        self.train_dataset = paddle.vision.datasets.MNIST(
            mode="train", backend="cv2", transform=transform)
        self.train_loader = paddle.io.DataLoader(
            self.train_dataset,
            places=paddle.set_device('cpu'),
            return_list=True)

        def _reader():
            for data in self.val_dataset:
                yield data

        self.val_reader = _reader
Пример #22
0
    def prepare_test_data(self, index):
        img_path = self.img_paths[index]

        img = get_img(img_path, self.read_type)
        img_meta = dict(org_img_size=np.array(img.shape[:2]))

        img = scale_aligned_short(img, self.short_size)
        img_meta.update(dict(img_size=np.array(img.shape[:2])))

        img = Image.fromarray(img)
        img = img.convert('RGB')
        img = transforms.ToTensor()(img)
        img = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                   std=[0.229, 0.224, 0.225])(img)

        return img.numpy(), img_meta["img_size"], img_meta["org_img_size"]
Пример #23
0
    def __init__(self, methodName='runTest', param_names=[]):
        super(TestFilterPruner, self).__init__(methodName)
        self._param_names = param_names
        transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
        self.train_dataset = paddle.vision.datasets.MNIST(mode="train",
                                                          backend="cv2",
                                                          transform=transform)
        self.val_dataset = paddle.vision.datasets.MNIST(mode="test",
                                                        backend="cv2",
                                                        transform=transform)

        def _reader():
            for data in self.val_dataset:
                yield data

        self.val_reader = _reader
Пример #24
0
def eval(args):

    paddle.set_device('gpu' if args.use_gpu else 'cpu')
    test_reader = None
    if args.data == "cifar10":
        transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
        val_dataset = paddle.vision.datasets.Cifar10(mode="test",
                                                     backend="cv2",
                                                     transform=transform)
        class_dim = 10
        image_shape = [3, 224, 224]
        pretrain = False
    elif args.data == "imagenet":
        val_dataset = ImageNetDataset("data/ILSVRC2012",
                                      mode='val',
                                      image_size=224,
                                      resize_short_size=256)
        class_dim = 1000
        image_shape = [3, 224, 224]
        pretrain = True
    else:
        raise ValueError("{} is not supported.".format(args.data))
    assert args.model in model_list, "{} is not in lists: {}".format(
        args.model, model_list)
    inputs = [Input([None] + image_shape, 'float32', name='image')]
    labels = [Input([None, 1], 'int64', name='label')]

    # model definition
    net = models.__dict__[args.model](pretrained=pretrain,
                                      num_classes=class_dim)

    pruner = paddleslim.dygraph.L1NormFilterPruner(net, [1] + image_shape)
    params = get_pruned_params(args, net)
    ratios = {}
    for param in params:
        ratios[param] = args.pruned_ratio
    print("ratios: {}".format(ratios))
    pruner.prune_vars(ratios, [0])

    model = paddle.Model(net, inputs, labels)
    model.prepare(None, paddle.nn.CrossEntropyLoss(),
                  paddle.metric.Accuracy(topk=(1, 5)))
    model.load(args.checkpoint)
    model.evaluate(eval_data=val_dataset,
                   batch_size=args.batch_size,
                   verbose=1,
                   num_workers=8)
Пример #25
0
 def __init__(self,
              output_path='output_dir',
              weight_path=None,
              use_adjust_brightness=True):
     self.output_path = output_path
     self.input_size = (256, 256)
     self.use_adjust_brightness = use_adjust_brightness
     if weight_path is None:
         vox_cpk_weight_url = 'https://paddlegan.bj.bcebos.com/models/animeganv2_hayao.pdparams'
         weight_path = get_path_from_url(vox_cpk_weight_url)
     self.weight_path = weight_path
     self.generator = self.load_checkpoints()
     self.transform = T.Compose([
         ResizeToScale((256, 256), 32),
         T.Transpose(),
         T.Normalize([127.5, 127.5, 127.5], [127.5, 127.5, 127.5])
     ])
    def func_warn_or_error(self):
        with self.assertRaises(ValueError):
            paddle.callbacks.ReduceLROnPlateau(factor=2.0)
        # warning
        paddle.callbacks.ReduceLROnPlateau(mode='1', patience=3, verbose=1)

        transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
        train_dataset = CustomMnist(mode='train', transform=transform)
        val_dataset = CustomMnist(mode='test', transform=transform)
        net = LeNet()
        optim = paddle.optimizer.Adam(learning_rate=0.001,
                                      parameters=net.parameters())
        inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')]
        labels = [InputSpec([None, 1], 'int64', 'label')]
        model = Model(net, inputs=inputs, labels=labels)
        model.prepare(optim, loss=CrossEntropyLoss(), metrics=[Accuracy()])
        callbacks = paddle.callbacks.ReduceLROnPlateau(monitor='miou',
                                                       patience=3,
                                                       verbose=1)
        model.fit(train_dataset,
                  val_dataset,
                  batch_size=8,
                  log_freq=1,
                  save_freq=10,
                  epochs=1,
                  callbacks=[callbacks])

        optim = paddle.optimizer.Adam(
            learning_rate=paddle.optimizer.lr.PiecewiseDecay([0.001, 0.0001],
                                                             [5, 10]),
            parameters=net.parameters())

        model.prepare(optim, loss=CrossEntropyLoss(), metrics=[Accuracy()])
        callbacks = paddle.callbacks.ReduceLROnPlateau(monitor='acc',
                                                       mode='max',
                                                       patience=3,
                                                       verbose=1,
                                                       cooldown=1)
        model.fit(train_dataset,
                  val_dataset,
                  batch_size=8,
                  log_freq=1,
                  save_freq=10,
                  epochs=3,
                  callbacks=[callbacks])
Пример #27
0
def load_train_test_datasets(dataset_root):
    mean = [0.485, 0.456, 0.406]
    std = [0.229, 0.224, 0.225]
    resize = transforms.Resize(256)
    rcrop = transforms.RandomCrop((224, 224))
    ccrop = transforms.CenterCrop((224, 224))
    tot = transforms.ToTensor()
    normalize = transforms.Normalize(mean, std)

    train_transforms = transforms.Compose([resize, rcrop, tot, normalize])
    test_transforms = transforms.Compose([resize, ccrop, tot, normalize])

    train_set = DatasetFolder(osp.join(dataset_root, 'train'),
                              transform=train_transforms)
    test_set = DatasetFolder(osp.join(dataset_root, 'test'),
                             transform=test_transforms)

    return train_set, test_set
Пример #28
0
 def __init__(self, model_path, use_cuda=True, use_static=False):
     self.use_static = use_static
     if not use_static:
         self.net = torch.jit.load(model_path)
     else:
         place = paddle.CUDAPlace(0)
         self.exe = paddle.static.Executor(place)
         self.static_model = paddle.static.load_inference_model(
             model_path, self.exe)
     logger = logging.getLogger("root.tracker")
     logger.info("Loading weights from {}... Done!".format(model_path))
     # self.net.to(self.device)
     self.size = (64, 128)
     self.norm = transforms.Compose([
         transforms.ToTensor(data_format='HWC'),
         transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225],
                              data_format='HWC'),
     ])
Пример #29
0
 def __init__(self, root_dir):
     super(MXFaceDataset, self).__init__()
     self.transform = transforms.Compose(
         [
          transforms.RandomHorizontalFlip(),
          transforms.ToTensor(),
          transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
          ])
     self.root_dir = root_dir
     path_imgrec = os.path.join(root_dir, 'train.rec')
     path_imgidx = os.path.join(root_dir, 'train.idx')
     self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')
     s = self.imgrec.read_idx(0)
     header, _ = recordio.unpack(s)
     if header.flag > 0:
         self.header0 = (int(header.label[0]), int(header.label[1]))
         self.imgidx = np.array(range(1, int(header.label[0])))
     else:
         self.imgidx = np.array(list(self.imgrec.keys))
Пример #30
0
    def test_save_load(self):
        paddle.disable_static()
        paddle.set_device('gpu')
        amp_level = {"level": "O1", "init_loss_scaling": 128}
        paddle.seed(2021)
        model = self.get_model(amp_level)
        transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])])
        train_dataset = MNIST(mode='train', transform=transform)
        model.fit(train_dataset,
                  epochs=1,
                  batch_size=64,
                  num_iters=2,
                  log_freq=1)
        model.save('./lenet_amp')

        with paddle.fluid.unique_name.guard():
            paddle.seed(2021)
            new_model = self.get_model(amp_level)
            train_dataset = MNIST(mode='train', transform=transform)
            new_model.fit(train_dataset,
                          epochs=1,
                          batch_size=64,
                          num_iters=1,
                          log_freq=1)
        # not equal before load
        self.assertNotEqual(new_model._scaler.state_dict()['incr_count'],
                            model._scaler.state_dict()['incr_count'])
        print((new_model._scaler.state_dict()['incr_count'],
               model._scaler.state_dict()['incr_count']))

        # equal after load
        new_model.load('./lenet_amp')
        self.assertEqual(new_model._scaler.state_dict()['incr_count'],
                         model._scaler.state_dict()['incr_count'])
        self.assertEqual(new_model._scaler.state_dict()['decr_count'],
                         model._scaler.state_dict()['decr_count'])
        self.assertTrue(
            np.array_equal(
                new_model._optimizer.state_dict()
                ['conv2d_1.w_0_moment1_0'].numpy(),
                model._optimizer.state_dict()
                ['conv2d_1.w_0_moment1_0'].numpy()))