예제 #1
0
파일: reader.py 프로젝트: houj04/PaddleHub
    def __init__(self,
                 input_size: int = 224,
                 scale_size: int = 256,
                 normalize: Optional[list] = None,
                 pre_transform: bool = True,
                 stage: str = "search",
                 **kwargs) -> None:
        """

        Args:
            input_size:
            scale_size:
            normalize:
            pre_transform:
            **kwargs:
        """

        if normalize is None:
            normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}

        policy = kwargs["policy"]
        assert stage in ["search", "train"]
        train_epochs = kwargs["hp_policy_epochs"]
        self.auto_aug_transform = AutoAugTransform.create(policy, stage=stage, train_epochs=train_epochs)
        #self.auto_aug_transform = PbtAutoAugmentClassiferTransform(conf)
        if pre_transform:
            self.pre_transform = transforms.Resize(input_size)

        self.post_transform = transforms.Compose(
            transforms=[transforms.Permute(),
                        transforms.Normalize(**normalize, channel_first=True)],
            channel_first=False)
        self.cur_epoch = 0
예제 #2
0
파일: reader.py 프로젝트: houj04/PaddleHub
def _init_loader(hparams: dict, TrainTransform=None) -> tuple:
    """

    Args:
        hparams:

    Returns:

    """
    train_data_root = hparams.data_config.train_img_prefix
    val_data_root = hparams.data_config.val_img_prefix
    train_list = hparams.data_config.train_ann_file
    val_list = hparams.data_config.val_ann_file
    input_size = hparams.task_config.classifier.input_size
    scale_size = hparams.task_config.classifier.scale_size
    search_space = hparams.search_space
    search_space["task_type"] = hparams.task_config.task_type
    epochs = hparams.task_config.classifier.epochs
    no_cache_img = hparams.task_config.classifier.get("no_cache_img", False)

    normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}

    if TrainTransform is None:
        TrainTransform = PbaAugment(
            input_size=input_size,
            scale_size=scale_size,
            normalize=normalize,
            policy=search_space,
            hp_policy_epochs=epochs,
        )
    delimiter = hparams.data_config.delimiter
    kwargs = dict(conf=hparams, delimiter=delimiter)

    if hparams.task_config.classifier.use_class_map:
        class_to_id_dict = _read_classes(label_list=hparams.data_config.label_list)
    else:
        class_to_id_dict = None
    train_data = PicReader(
        root_path=train_data_root,
        list_file=train_list,
        transform=TrainTransform,
        class_to_id_dict=class_to_id_dict,
        cache_img=not no_cache_img,
        **kwargs)

    val_data = PicReader(
        root_path=val_data_root,
        list_file=val_list,
        transform=transforms.Compose(
            transforms=[
                transforms.Resize((224, 224)),
                transforms.Permute(),
                transforms.Normalize(**normalize, channel_first=True)
            ],
            channel_first=False),
        class_to_id_dict=class_to_id_dict,
        cache_img=not no_cache_img,
        **kwargs)

    return train_data, val_data
예제 #3
0
 def transforms(self, images: Union[str, np.ndarray]):
     transforms = T.Compose([
         T.Resize((256, 256)),
         T.CenterCrop(224),
         T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
     ],
                            to_rgb=True)
     return transforms(images).astype('float32')
예제 #4
0
def valid():
    transforms = T.Compose(
            [T.Resize((256, 256)),
             T.CenterCrop(224),
             T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])],
            to_rgb=True)

    peach_test = DemoDataset(transforms, mode='test')

    model = hub.Module(name='resnet50_vd_imagenet_ssld', label_list=["R0", "B1", "M2", "S3"])

    optimizer = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())
    trainer = Trainer(model, optimizer, checkpoint_dir='img_classification_ckpt', use_gpu=True)
    trainer.evaluate(peach_test, 16)
예제 #5
0
def preprocess(image_path):
    ''' preprocess input image file to np.ndarray

    Args:
        image_path(str): Path of input image file

    Returns:
        ProcessedImage(numpy.ndarray): A numpy.ndarray
                variable which shape is (1, 3, 192, 192)
    '''
    transforms = T.Compose([
        T.Resize((192, 192)),
        T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    ],
        to_rgb=True)
    return np.expand_dims(transforms(image_path), axis=0)
예제 #6
0
def preprocess(image_path):
    ''' preprocess input image file to np.ndarray

    Args:
        image_path(str): Path of input image file

    Returns:
        ProcessedImage(numpy.ndarray): A numpy.ndarray
                variable which shape is (1, 3, 224, 224)
    '''
    transforms = T.Compose([
        T.Resize((256, 256)),
        T.CenterCrop(224),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ],
                           to_rgb=True)
    return np.expand_dims(transforms(image_path), axis=0)
예제 #7
0
import paddle
import paddlehub as hub
import paddlehub.vision.transforms as T
from paddlehub.finetune.trainer import Trainer
from paddlehub.datasets import Flowers

if __name__ == '__main__':
    transforms = T.Compose(
        [T.Resize((256, 256)),
         T.CenterCrop(224),
         T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])],
        to_rgb=True)

    flowers = Flowers(transforms)
    flowers_validate = Flowers(transforms, mode='val')
    model = hub.Module(
        name='resnet50_vd_imagenet_ssld',
        label_list=["roses", "tulips", "daisy", "sunflowers", "dandelion"],
        load_checkpoint=None)
    optimizer = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())
    trainer = Trainer(model, optimizer, checkpoint_dir='img_classification_ckpt', use_gpu=True)
    trainer.train(flowers, epochs=100, batch_size=32, eval_dataset=flowers_validate, save_interval=10)
예제 #8
0
    def __init__(self, load_checkpoint: str = None):
        super(BodyPoseModel, self).__init__()

        self.resize_func = P.ResizeScaling()
        self.norm_func = T.Normalize(std=[1, 1, 1])
        self.pad_func = P.PadDownRight()
        self.remove_pad = P.RemovePadding()
        self.get_peak = P.GetPeak()
        self.get_connection = P.Connection()
        self.get_candidate = P.Candidate()
        self.draw_pose = P.DrawPose()

        no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1', \
                          'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2', \
                          'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1', \
                          'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1']
        blocks = {}
        block0 = OrderedDict([('conv1_1', [3, 64, 3, 1, 1]),
                              ('conv1_2', [64, 64, 3, 1, 1]),
                              ('pool1_stage1', [2, 2, 0]),
                              ('conv2_1', [64, 128, 3, 1, 1]),
                              ('conv2_2', [128, 128, 3, 1, 1]),
                              ('pool2_stage1', [2, 2, 0]),
                              ('conv3_1', [128, 256, 3, 1, 1]),
                              ('conv3_2', [256, 256, 3, 1, 1]),
                              ('conv3_3', [256, 256, 3, 1, 1]),
                              ('conv3_4', [256, 256, 3, 1, 1]),
                              ('pool3_stage1', [2, 2, 0]),
                              ('conv4_1', [256, 512, 3, 1, 1]),
                              ('conv4_2', [512, 512, 3, 1, 1]),
                              ('conv4_3_CPM', [512, 256, 3, 1, 1]),
                              ('conv4_4_CPM', [256, 128, 3, 1, 1])])

        block1_1 = OrderedDict([('conv5_1_CPM_L1', [128, 128, 3, 1, 1]),
                                ('conv5_2_CPM_L1', [128, 128, 3, 1, 1]),
                                ('conv5_3_CPM_L1', [128, 128, 3, 1, 1]),
                                ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]),
                                ('conv5_5_CPM_L1', [512, 38, 1, 1, 0])])

        block1_2 = OrderedDict([('conv5_1_CPM_L2', [128, 128, 3, 1, 1]),
                                ('conv5_2_CPM_L2', [128, 128, 3, 1, 1]),
                                ('conv5_3_CPM_L2', [128, 128, 3, 1, 1]),
                                ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]),
                                ('conv5_5_CPM_L2', [512, 19, 1, 1, 0])])
        blocks['block1_1'] = block1_1
        blocks['block1_2'] = block1_2

        self.model0 = self.make_layers(block0, no_relu_layers)

        for i in range(2, 7):
            blocks['block%d_1' % i] = OrderedDict([
                ('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]),
                ('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]),
                ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]),
                ('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]),
                ('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]),
                ('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]),
                ('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0])
            ])

            blocks['block%d_2' % i] = OrderedDict([
                ('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]),
                ('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]),
                ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]),
                ('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]),
                ('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]),
                ('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]),
                ('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0])
            ])

        for k in blocks.keys():
            blocks[k] = self.make_layers(blocks[k], no_relu_layers)

        self.model1_1 = blocks['block1_1']
        self.model2_1 = blocks['block2_1']
        self.model3_1 = blocks['block3_1']
        self.model4_1 = blocks['block4_1']
        self.model5_1 = blocks['block5_1']
        self.model6_1 = blocks['block6_1']

        self.model1_2 = blocks['block1_2']
        self.model2_2 = blocks['block2_2']
        self.model3_2 = blocks['block3_2']
        self.model4_2 = blocks['block4_2']
        self.model5_2 = blocks['block5_2']
        self.model6_2 = blocks['block6_2']

        if load_checkpoint is not None:
            self.model_dict = paddle.load(load_checkpoint)
            self.set_dict(self.model_dict)
            print("load custom checkpoint success")

        else:
            checkpoint = os.path.join(self.directory, 'openpose_body.pdparams')
            self.model_dict = paddle.load(checkpoint)
            self.set_dict(self.model_dict)
            print("load pretrained checkpoint success")
예제 #9
0
파일: train.py 프로젝트: houj04/PaddleHub
    resource_config = config.resource_config
    algo_config = config.algo_config

    input_size = task_config.classifier.input_size
    scale_size = task_config.classifier.scale_size
    normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}
    epochs = task_config.classifier.epochs

    policy = args.policy
    if policy is None:
        print("use normal train transform")
        TrainTransform = transforms.Compose(
            transforms=[
                transforms.Resize((input_size, input_size)),
                transforms.Permute(),
                transforms.Normalize(**normalize, channel_first=True)
            ],
            channel_first=False)
    else:
        TrainTransform = PbaAugment(
            input_size=input_size,
            scale_size=scale_size,
            normalize=normalize,
            policy=policy,
            hp_policy_epochs=epochs,
            stage="train")
    train_dataset, eval_dataset = _init_loader(config, TrainTransform=TrainTransform)
    class_to_id_dict = _read_classes(config.data_config.label_list)
    model = hub.Module(
        name=config.task_config.classifier.model_name, label_list=class_to_id_dict.keys(), load_checkpoint=None)
예제 #10
0
파일: module.py 프로젝트: houj04/PaddleHub
    def __init__(self, load_checkpoint: str = None):
        super(HandPoseModel, self).__init__()

        self.norm_func = T.Normalize(std=[1, 1, 1])
        self.resize_func = P.ResizeScaling()
        self.hand_detect = P.HandDetect()
        self.pad_func = P.PadDownRight()
        self.remove_pad = P.RemovePadding()
        self.draw_pose = P.DrawPose()
        self.draw_hand = P.DrawHandPose()

        no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3', \
                          'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6']

        block1_0 = OrderedDict([('conv1_1', [3, 64, 3, 1, 1]),
                                ('conv1_2', [64, 64, 3, 1, 1]),
                                ('pool1_stage1', [2, 2, 0]),
                                ('conv2_1', [64, 128, 3, 1, 1]),
                                ('conv2_2', [128, 128, 3, 1, 1]),
                                ('pool2_stage1', [2, 2, 0]),
                                ('conv3_1', [128, 256, 3, 1, 1]),
                                ('conv3_2', [256, 256, 3, 1, 1]),
                                ('conv3_3', [256, 256, 3, 1, 1]),
                                ('conv3_4', [256, 256, 3, 1, 1]),
                                ('pool3_stage1', [2, 2, 0]),
                                ('conv4_1', [256, 512, 3, 1, 1]),
                                ('conv4_2', [512, 512, 3, 1, 1]),
                                ('conv4_3', [512, 512, 3, 1, 1]),
                                ('conv4_4', [512, 512, 3, 1, 1]),
                                ('conv5_1', [512, 512, 3, 1, 1]),
                                ('conv5_2', [512, 512, 3, 1, 1]),
                                ('conv5_3_CPM', [512, 128, 3, 1, 1])])

        block1_1 = OrderedDict([('conv6_1_CPM', [128, 512, 1, 1, 0]),
                                ('conv6_2_CPM', [512, 22, 1, 1, 0])])

        blocks = {}
        blocks['block1_0'] = block1_0
        blocks['block1_1'] = block1_1

        for i in range(2, 7):
            blocks['block%d' % i] = OrderedDict([
                ('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]),
                ('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]),
                ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]),
                ('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]),
                ('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]),
                ('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]),
                ('Mconv7_stage%d' % i, [128, 22, 1, 1, 0])
            ])

        for k in blocks.keys():
            blocks[k] = self.make_layers(blocks[k], no_relu_layers)

        self.model1_0 = blocks['block1_0']
        self.model1_1 = blocks['block1_1']
        self.model2 = blocks['block2']
        self.model3 = blocks['block3']
        self.model4 = blocks['block4']
        self.model5 = blocks['block5']
        self.model6 = blocks['block6']

        if load_checkpoint is not None:
            self.model_dict = paddle.load(load_checkpoint)[0]
            self.set_dict(self.model_dict)
            print("load custom checkpoint success")

        else:
            checkpoint = os.path.join(self.directory, 'openpose_hand.pdparams')
            self.model_dict = paddle.load(checkpoint)
            self.set_dict(self.model_dict)
            print("load pretrained checkpoint success")
        self.body_model = None