예제 #1
0
파일: reader.py 프로젝트: houj04/PaddleHub
    def __init__(self,
                 input_size: int = 224,
                 scale_size: int = 256,
                 normalize: Optional[list] = None,
                 pre_transform: bool = True,
                 stage: str = "search",
                 **kwargs) -> None:
        """

        Args:
            input_size:
            scale_size:
            normalize:
            pre_transform:
            **kwargs:
        """

        if normalize is None:
            normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}

        policy = kwargs["policy"]
        assert stage in ["search", "train"]
        train_epochs = kwargs["hp_policy_epochs"]
        self.auto_aug_transform = AutoAugTransform.create(policy, stage=stage, train_epochs=train_epochs)
        #self.auto_aug_transform = PbtAutoAugmentClassiferTransform(conf)
        if pre_transform:
            self.pre_transform = transforms.Resize(input_size)

        self.post_transform = transforms.Compose(
            transforms=[transforms.Permute(),
                        transforms.Normalize(**normalize, channel_first=True)],
            channel_first=False)
        self.cur_epoch = 0
예제 #2
0
    def transforms(self, images: str) -> callable:

        transform = T.Compose(
            [T.Resize((256, 256), interpolation='NEAREST'),
             T.RGB2LAB()],
            to_rgb=True)
        return transform(images)
예제 #3
0
파일: reader.py 프로젝트: houj04/PaddleHub
def _init_loader(hparams: dict, TrainTransform=None) -> tuple:
    """

    Args:
        hparams:

    Returns:

    """
    train_data_root = hparams.data_config.train_img_prefix
    val_data_root = hparams.data_config.val_img_prefix
    train_list = hparams.data_config.train_ann_file
    val_list = hparams.data_config.val_ann_file
    input_size = hparams.task_config.classifier.input_size
    scale_size = hparams.task_config.classifier.scale_size
    search_space = hparams.search_space
    search_space["task_type"] = hparams.task_config.task_type
    epochs = hparams.task_config.classifier.epochs
    no_cache_img = hparams.task_config.classifier.get("no_cache_img", False)

    normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}

    if TrainTransform is None:
        TrainTransform = PbaAugment(
            input_size=input_size,
            scale_size=scale_size,
            normalize=normalize,
            policy=search_space,
            hp_policy_epochs=epochs,
        )
    delimiter = hparams.data_config.delimiter
    kwargs = dict(conf=hparams, delimiter=delimiter)

    if hparams.task_config.classifier.use_class_map:
        class_to_id_dict = _read_classes(label_list=hparams.data_config.label_list)
    else:
        class_to_id_dict = None
    train_data = PicReader(
        root_path=train_data_root,
        list_file=train_list,
        transform=TrainTransform,
        class_to_id_dict=class_to_id_dict,
        cache_img=not no_cache_img,
        **kwargs)

    val_data = PicReader(
        root_path=val_data_root,
        list_file=val_list,
        transform=transforms.Compose(
            transforms=[
                transforms.Resize((224, 224)),
                transforms.Permute(),
                transforms.Normalize(**normalize, channel_first=True)
            ],
            channel_first=False),
        class_to_id_dict=class_to_id_dict,
        cache_img=not no_cache_img,
        **kwargs)

    return train_data, val_data
예제 #4
0
 def transforms(self, images: Union[str, np.ndarray]):
     transforms = T.Compose([
         T.Resize((256, 256)),
         T.CenterCrop(224),
         T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
     ],
                            to_rgb=True)
     return transforms(images).astype('float32')
예제 #5
0
def valid():
    transforms = T.Compose(
            [T.Resize((256, 256)),
             T.CenterCrop(224),
             T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])],
            to_rgb=True)

    peach_test = DemoDataset(transforms, mode='test')

    model = hub.Module(name='resnet50_vd_imagenet_ssld', label_list=["R0", "B1", "M2", "S3"])

    optimizer = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())
    trainer = Trainer(model, optimizer, checkpoint_dir='img_classification_ckpt', use_gpu=True)
    trainer.evaluate(peach_test, 16)
예제 #6
0
def preprocess(image_path):
    ''' preprocess input image file to np.ndarray

    Args:
        image_path(str): Path of input image file

    Returns:
        ProcessedImage(numpy.ndarray): A numpy.ndarray
                variable which shape is (1, 3, 192, 192)
    '''
    transforms = T.Compose([
        T.Resize((192, 192)),
        T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    ],
        to_rgb=True)
    return np.expand_dims(transforms(image_path), axis=0)
예제 #7
0
def preprocess(image_path):
    ''' preprocess input image file to np.ndarray

    Args:
        image_path(str): Path of input image file

    Returns:
        ProcessedImage(numpy.ndarray): A numpy.ndarray
                variable which shape is (1, 3, 224, 224)
    '''
    transforms = T.Compose([
        T.Resize((256, 256)),
        T.CenterCrop(224),
        T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ],
                           to_rgb=True)
    return np.expand_dims(transforms(image_path), axis=0)
예제 #8
0
import paddle
import paddlehub as hub
import paddlehub.vision.transforms as T
from paddlehub.finetune.trainer import Trainer
from paddlehub.datasets import Flowers

if __name__ == '__main__':
    transforms = T.Compose(
        [T.Resize((256, 256)),
         T.CenterCrop(224),
         T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])],
        to_rgb=True)

    flowers = Flowers(transforms)
    flowers_validate = Flowers(transforms, mode='val')
    model = hub.Module(
        name='resnet50_vd_imagenet_ssld',
        label_list=["roses", "tulips", "daisy", "sunflowers", "dandelion"],
        load_checkpoint=None)
    optimizer = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())
    trainer = Trainer(model, optimizer, checkpoint_dir='img_classification_ckpt', use_gpu=True)
    trainer.train(flowers, epochs=100, batch_size=32, eval_dataset=flowers_validate, save_interval=10)
예제 #9
0
파일: train.py 프로젝트: houj04/PaddleHub
    task_config = config.task_config
    data_config = config.data_config
    resource_config = config.resource_config
    algo_config = config.algo_config

    input_size = task_config.classifier.input_size
    scale_size = task_config.classifier.scale_size
    normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}
    epochs = task_config.classifier.epochs

    policy = args.policy
    if policy is None:
        print("use normal train transform")
        TrainTransform = transforms.Compose(
            transforms=[
                transforms.Resize((input_size, input_size)),
                transforms.Permute(),
                transforms.Normalize(**normalize, channel_first=True)
            ],
            channel_first=False)
    else:
        TrainTransform = PbaAugment(
            input_size=input_size,
            scale_size=scale_size,
            normalize=normalize,
            policy=policy,
            hp_policy_epochs=epochs,
            stage="train")
    train_dataset, eval_dataset = _init_loader(config, TrainTransform=TrainTransform)
    class_to_id_dict = _read_classes(config.data_config.label_list)
    model = hub.Module(
예제 #10
0
파일: train.py 프로젝트: zzwpower/PaddleHub
import paddle
import paddlehub as hub

from paddlehub.finetune.trainer import Trainer
from paddlehub.datasets.minicoco import MiniCOCO
import paddlehub.vision.transforms as T

if __name__ == "__main__":
    model = hub.Module(name='msgnet')
    transform = T.Compose([T.Resize((256, 256), interpolation='LINEAR')])
    styledata = MiniCOCO(transform)
    optimizer = paddle.optimizer.Adam(learning_rate=0.0001,
                                      parameters=model.parameters())
    trainer = Trainer(model, optimizer, checkpoint_dir='test_style_ckpt')
    trainer.train(styledata,
                  epochs=101,
                  batch_size=4,
                  eval_dataset=styledata,
                  log_interval=10,
                  save_interval=10)
예제 #11
0
import paddle
import paddlehub as hub
import paddlehub.vision.transforms as T
from paddlehub.finetune.trainer import Trainer
from paddlehub.datasets import Canvas

if __name__ == '__main__':

    transform = T.Compose(
        [T.Resize((256, 256), interpolation='NEAREST'),
         T.RandomPaddingCrop(crop_size=176),
         T.RGB2LAB()], to_rgb=True)

    color_set = Canvas(transform=transform, mode='train')
    model = hub.Module(name='user_guided_colorization', load_checkpoint='/PATH/TO/CHECKPOINT')

    model.set_config(classification=True, prob=1)
    optimizer = paddle.optimizer.Adam(learning_rate=0.0001, parameters=model.parameters())
    trainer = Trainer(model, optimizer, checkpoint_dir='img_colorization_ckpt_cls_1')
    trainer.train(color_set, epochs=201, batch_size=25, eval_dataset=color_set, log_interval=10, save_interval=10)

    model.set_config(classification=False, prob=0.125)
    optimizer = paddle.optimizer.Adam(learning_rate=0.00001, parameters=model.parameters())
    trainer = Trainer(model, optimizer, checkpoint_dir='img_colorization_ckpt_reg_1')
    trainer.train(color_set, epochs=101, batch_size=25, log_interval=10, save_interval=10)
예제 #12
0
import paddle
import paddlehub as hub
import paddlehub.vision.transforms as T
from paddlehub.finetune.trainer import Trainer
from paddlehub.datasets import Flowers

if __name__ == '__main__':
    transforms = T.Compose([T.Resize((256, 256)), 
                            T.CenterCrop(224), 
                            T.Normalize(mean=[0.485, 0.456, 0.406], std = [0.229, 0.224, 0.225])], 
                            to_rgb=True)
                            
    flowers = Flowers(transforms)
    flowers_validate = Flowers(transforms, mode='val')
    model = hub.Module(name='resnet50_vd_imagenet_ssld', label_list=["roses", "tulips", "daisy", "sunflowers", "dandelion"], load_checkpoint=None)
    optimizer = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())
    trainer = Trainer(model, optimizer, checkpoint_dir='img_classification_ckpt')
    trainer.train(flowers, epochs=100, batch_size=32, eval_dataset=flowers_validate, save_interval=10)
예제 #13
0
import paddle
import paddlehub as hub
import paddlehub.vision.transforms as T
from paddlehub.finetune.trainer import Trainer
from paddlehub.datasets import Canvas

if __name__ == '__main__':

    transform = T.Compose([T.Resize((256, 256), interpolation='NEAREST'),
                           T.RandomPaddingCrop(crop_size=176),
                           T.RGB2LAB()], to_rgb=True)

    color_set = Canvas(transform=transform, mode='train')
    model = hub.Module(name='user_guided_colorization', load_checkpoint='/PATH/TO/CHECKPOINT')

    model.set_config(classification=True, prob=1)
    optimizer = paddle.optimizer.Adam(learning_rate=0.0001, parameters=model.parameters())
    trainer = Trainer(model, optimizer, checkpoint_dir='img_colorization_ckpt_cls_1')
    trainer.train(color_set, epochs=201, batch_size=25, eval_dataset=color_set, log_interval=10, save_interval=10)

    model.set_config(classification=False, prob=0.125)
    optimizer = paddle.optimizer.Adam(learning_rate=0.00001, parameters=model.parameters())
    trainer = Trainer(model, optimizer, checkpoint_dir='img_colorization_ckpt_reg_1')
    trainer.train(color_set, epochs=101, batch_size=25, log_interval=10, save_interval=10)