Ejemplo n.º 1
0
def main(args):
    env_info = get_environ_info()
    places = fluid.CUDAPlace(ParallelEnv().dev_id) \
        if env_info['Paddle compiled with cuda'] and env_info['GPUs used'] \
        else fluid.CPUPlace()

    if args.dataset not in DATASETS:
        raise Exception(
            '`--dataset` is invalid. it should be one of {}'.format(
                str(list(DATASETS.keys()))))
    dataset = DATASETS[args.dataset]

    with fluid.dygraph.guard(places):
        test_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()])
        test_dataset = dataset(dataset_root=args.dataset_root,
                               transforms=test_transforms,
                               mode='test')

        model = manager.MODELS[args.model_name](
            num_classes=test_dataset.num_classes)

        infer(model,
              model_dir=args.model_dir,
              test_dataset=test_dataset,
              save_dir=args.save_dir)
Ejemplo n.º 2
0
def main(args):
    env_info = get_environ_info()
    places = fluid.CUDAPlace(ParallelEnv().dev_id) \
        if env_info['place'] == 'cuda' and fluid.is_compiled_with_cuda() \
        else fluid.CPUPlace()

    if args.dataset not in DATASETS:
        raise Exception(
            '`--dataset` is invalid. it should be one of {}'.format(
                str(list(DATASETS.keys()))))
    dataset = DATASETS[args.dataset]

    with fluid.dygraph.guard(places):
        eval_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()])
        eval_dataset = dataset(dataset_root=args.dataset_root,
                               transforms=eval_transforms,
                               mode='val')

        if args.model_name not in MODELS:
            raise Exception(
                '`--model_name` is invalid. it should be one of {}'.format(
                    str(list(MODELS.keys()))))
        model = MODELS[args.model_name](num_classes=eval_dataset.num_classes)

        evaluate(model,
                 eval_dataset,
                 model_dir=args.model_dir,
                 num_classes=eval_dataset.num_classes)
Ejemplo n.º 3
0
def main(args):
    env_info = get_environ_info()
    places = fluid.CUDAPlace(ParallelEnv().dev_id) \
        if env_info['place'] == 'cuda' and fluid.is_compiled_with_cuda() \
        else fluid.CPUPlace()

    if args.dataset not in DATASETS:
        raise Exception(
            '`--dataset` is invalid. it should be one of {}'.format(
                str(list(DATASETS.keys()))))
    dataset = DATASETS[args.dataset]

    with fluid.dygraph.guard(places):
        # Creat dataset reader
        train_transforms = T.Compose([
            T.RandomHorizontalFlip(0.5),
            T.ResizeStepScaling(0.5, 2.0, 0.25),
            T.RandomPaddingCrop(args.input_size),
            T.RandomDistort(),
            T.Normalize(),
        ])
        train_dataset = dataset(dataset_root=args.dataset_root,
                                transforms=train_transforms,
                                mode='train')

        eval_dataset = None
        if args.do_eval:
            eval_transforms = T.Compose([T.Normalize()])
            eval_dataset = dataset(dataset_root=args.dataset_root,
                                   transforms=eval_transforms,
                                   mode='val')

        if args.model_name not in MODELS:
            raise Exception(
                '`--model_name` is invalid. it should be one of {}'.format(
                    str(list(MODELS.keys()))))
        model = MODELS[args.model_name](num_classes=train_dataset.num_classes)

        # Creat optimizer
        # todo, may less one than len(loader)
        num_steps_each_epoch = len(train_dataset) // (args.batch_size *
                                                      ParallelEnv().nranks)
        decay_step = args.num_epochs * num_steps_each_epoch
        lr_decay = fluid.layers.polynomial_decay(args.learning_rate,
                                                 decay_step,
                                                 end_learning_rate=0,
                                                 power=0.9)
        optimizer = fluid.optimizer.Momentum(
            lr_decay,
            momentum=0.9,
            parameter_list=model.parameters(),
            regularization=fluid.regularizer.L2Decay(
                regularization_coeff=4e-5))

        train(model,
              train_dataset,
              places=places,
              eval_dataset=eval_dataset,
              optimizer=optimizer,
              save_dir=args.save_dir,
              num_epochs=args.num_epochs,
              batch_size=args.batch_size,
              pretrained_model=args.pretrained_model,
              resume_model=args.resume_model,
              save_interval_epochs=args.save_interval_epochs,
              log_steps=args.log_steps,
              num_classes=train_dataset.num_classes,
              num_workers=args.num_workers,
              use_vdl=args.use_vdl)
Ejemplo n.º 4
0
def main(args):
    env_info = get_environ_info()
    info = ['{}: {}'.format(k, v) for k, v in env_info.items()]
    info = '\n'.join(['\n', format('Environment Information', '-^48s')] +
                     info + ['-' * 48])
    logger.info(info)

    places = fluid.CUDAPlace(ParallelEnv().dev_id) \
        if env_info['Paddle compiled with cuda'] and env_info['GPUs used'] \
        else fluid.CPUPlace()

    if args.dataset not in DATASETS:
        raise Exception(
            '`--dataset` is invalid. it should be one of {}'.format(
                str(list(DATASETS.keys()))))
    dataset = DATASETS[args.dataset]

    with fluid.dygraph.guard(places):
        # Creat dataset reader
        train_transforms = T.Compose([
            T.RandomHorizontalFlip(0.5),
            T.ResizeStepScaling(0.5, 2.0, 0.25),
            T.RandomPaddingCrop(args.input_size),
            T.RandomDistort(),
            T.Normalize(),
        ])
        train_dataset = dataset(dataset_root=args.dataset_root,
                                transforms=train_transforms,
                                mode='train')

        eval_dataset = None
        if args.do_eval:
            eval_transforms = T.Compose([T.Normalize()])
            eval_dataset = dataset(dataset_root=args.dataset_root,
                                   transforms=eval_transforms,
                                   mode='val')

        model = manager.MODELS[args.model_name](
            num_classes=train_dataset.num_classes,
            pretrained_model=args.pretrained_model)

        # Creat optimizer
        # todo, may less one than len(loader)
        num_iters_each_epoch = len(train_dataset) // (args.batch_size *
                                                      ParallelEnv().nranks)
        lr_decay = fluid.layers.polynomial_decay(args.learning_rate,
                                                 args.iters,
                                                 end_learning_rate=0,
                                                 power=0.9)
        optimizer = fluid.optimizer.Momentum(
            lr_decay,
            momentum=0.9,
            parameter_list=model.parameters(),
            regularization=fluid.regularizer.L2Decay(
                regularization_coeff=4e-5))

        train(model,
              train_dataset,
              places=places,
              eval_dataset=eval_dataset,
              optimizer=optimizer,
              save_dir=args.save_dir,
              iters=args.iters,
              batch_size=args.batch_size,
              resume_model=args.resume_model,
              save_interval_iters=args.save_interval_iters,
              log_iters=args.log_iters,
              num_classes=train_dataset.num_classes,
              num_workers=args.num_workers,
              use_vdl=args.use_vdl)